Version 3.8.7

Ensure that LRandom restores rsi after call to the C function on x64. (Chromium issue http://crbug.com/110509)

Fixing include issues on *bsd when building with scons. (issue 1897)

Provide a switch to specify -fno-strict-aliasing (issue 1887)

Move WIN32 define from standalone.gypi to common.gypi (issue 1760)

Fix corner-case in heap size estimation. (issue 1893)

Fix and enable NEW_NON_STRICT_FAST ArgumentsAccess stub on x64. (issue 1903)

Performance improvements and bug fixes.

git-svn-id: http://v8.googlecode.com/svn/trunk@10447 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 1c1bddd..e614356 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,26 @@
+2012-01-19: Version 3.8.7
+
+        Ensure that LRandom restores rsi after call to the C function on x64.
+        (Chromium issue http://crbug.com/110509)
+
+        Fixing include issues on *bsd when building with scons.
+        (issue 1897)
+
+        Provide a switch to specify -fno-strict-aliasing
+        (issue 1887)
+
+        Move WIN32 define from standalone.gypi to common.gypi
+        (issue 1760)
+
+        Fix corner-case in heap size estimation.
+        (issue 1893)
+
+        Fix and enable NEW_NON_STRICT_FAST ArgumentsAccess stub on x64.
+        (issue 1903)
+
+        Performance improvements and bug fixes.
+
+
 2012-01-16: Version 3.8.6
 
         Add primitive WebGL array support to d8.
diff --git a/Makefile b/Makefile
index 09d1e8b..73e8421 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -33,6 +33,8 @@
 TESTJOBS ?= -j16
 GYPFLAGS ?=
 TESTFLAGS ?=
+ANDROID_NDK_ROOT ?=
+ANDROID_TOOL_PREFIX = $(ANDROID_NDK_ROOT)/toolchain/bin/arm-linux-androideabi
 
 # Special build flags. Use them like this: "make library=shared"
 
@@ -85,6 +87,10 @@
 ifeq ($(presubmit), no)
   TESTFLAGS += --no-presubmit
 endif
+# strictaliasing=off (workaround for GCC-4.5)
+ifeq ($(strictaliasing), off)
+  GYPFLAGS += -Dv8_no_strict_aliasing=1
+endif
 
 # ----------------- available targets: --------------------
 # - "dependencies": pulls in external dependencies (currently: GYP)
@@ -93,6 +99,7 @@
 # - every combination <arch>.<mode>, e.g. "ia32.release"
 # - "native": current host's architecture, release mode
 # - any of the above with .check appended, e.g. "ia32.release.check"
+# - "android": cross-compile for Android/ARM (release mode)
 # - default (no target specified): build all DEFAULT_ARCHES and MODES
 # - "check": build all targets and run all tests
 # - "<arch>.clean" for any <arch> in ARCHES
@@ -120,7 +127,8 @@
 
 .PHONY: all check clean dependencies $(ENVFILE).new native \
         $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
-        $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
+        $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
+        must-set-ANDROID_NDK_ROOT
 
 # Target definitions. "all" is the default.
 all: $(MODES)
@@ -144,6 +152,18 @@
 	         CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
 	         builddir="$(shell pwd)/$(OUTDIR)/$@"
 
+# TODO(jkummerow): add "android.debug" when we need it.
+android android.release: $(OUTDIR)/Makefile-android
+	@$(MAKE) -C "$(OUTDIR)" -f Makefile-android \
+	        CXX="$(ANDROID_TOOL_PREFIX)-g++" \
+	        AR="$(ANDROID_TOOL_PREFIX)-ar" \
+	        RANLIB="$(ANDROID_TOOL_PREFIX)-ranlib" \
+	        CC="$(ANDROID_TOOL_PREFIX)-gcc" \
+	        LD="$(ANDROID_TOOL_PREFIX)-ld" \
+	        LINK="$(ANDROID_TOOL_PREFIX)-g++" \
+	        BUILDTYPE=Release \
+	        builddir="$(shell pwd)/$(OUTDIR)/android.release"
+
 # Test targets.
 check: all
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
@@ -178,6 +198,11 @@
 	rm -rf $(OUTDIR)/native
 	find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
 
+android.clean:
+	rm -f $(OUTDIR)/Makefile-android
+	rm -rf $(OUTDIR)/android.release
+	find $(OUTDIR) -regex '.*\(host\|target\)-android\.mk' -delete
+
 clean: $(addsuffix .clean,$(ARCHES)) native.clean
 
 # GYP file generation targets.
@@ -205,6 +230,18 @@
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
 
+$(OUTDIR)/Makefile-android: $(GYPFILES) $(ENVFILE) build/android.gypi \
+                            must-set-ANDROID_NDK_ROOT
+	CC="${ANDROID_TOOL_PREFIX}-gcc" \
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
+	              -S-android $(GYPFLAGS)
+
+must-set-ANDROID_NDK_ROOT:
+ifndef ANDROID_NDK_ROOT
+	  $(error ANDROID_NDK_ROOT is not set)
+endif
+
 # Replaces the old with the new environment file if they're different, which
 # will trigger GYP to regenerate Makefiles.
 $(ENVFILE): $(ENVFILE).new
diff --git a/SConstruct b/SConstruct
index 3f12907..d4eaebe 100644
--- a/SConstruct
+++ b/SConstruct
@@ -33,6 +33,7 @@
 from os.path import join, dirname, abspath
 from types import DictType, StringTypes
 root_dir = dirname(File('SConstruct').rfile().abspath)
+src_dir = join(root_dir, 'src')
 sys.path.insert(0, join(root_dir, 'tools'))
 import js2c, utils
 
@@ -53,7 +54,7 @@
 
 LIBRARY_FLAGS = {
   'all': {
-    'CPPPATH': [join(root_dir, 'src')],
+    'CPPPATH': [src_dir],
     'regexp:interpreted': {
       'CPPDEFINES': ['V8_INTERPRETED_REGEXP']
     },
@@ -111,13 +112,13 @@
       }
     },
     'os:freebsd': {
-      'CPPPATH' : ['/usr/local/include'],
+      'CPPPATH' : [src_dir, '/usr/local/include'],
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
       'LIBS': ['execinfo']
     },
     'os:openbsd': {
-      'CPPPATH' : ['/usr/local/include'],
+      'CPPPATH' : [src_dir, '/usr/local/include'],
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
     },
@@ -125,12 +126,12 @@
       # On Solaris, to get isinf, INFINITY, fpclassify and other macros one
       # needs to define __C99FEATURES__.
       'CPPDEFINES': ['__C99FEATURES__'],
-      'CPPPATH' : ['/usr/local/include'],
+      'CPPPATH' : [src_dir, '/usr/local/include'],
       'LIBPATH' : ['/usr/local/lib'],
       'CCFLAGS':      ['-ansi'],
     },
     'os:netbsd': {
-      'CPPPATH' : ['/usr/pkg/include'],
+      'CPPPATH' : [src_dir, '/usr/pkg/include'],
       'LIBPATH' : ['/usr/pkg/lib'],
     },
     'os:win32': {
@@ -403,7 +404,7 @@
 
 CCTEST_EXTRA_FLAGS = {
   'all': {
-    'CPPPATH': [join(root_dir, 'src')],
+    'CPPPATH': [src_dir],
     'library:shared': {
       'CPPDEFINES': ['USING_V8_SHARED']
     },
@@ -460,7 +461,7 @@
 
 SAMPLE_FLAGS = {
   'all': {
-    'CPPPATH': [join(abspath('.'), 'include')],
+    'CPPPATH': [join(root_dir, 'include')],
     'library:shared': {
       'CPPDEFINES': ['USING_V8_SHARED']
     },
@@ -643,7 +644,7 @@
 
 PREPARSER_FLAGS = {
   'all': {
-    'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')],
+    'CPPPATH': [join(root_dir, 'include'), src_dir],
     'library:shared': {
       'CPPDEFINES': ['USING_V8_SHARED']
     },
diff --git a/build/android.gypi b/build/android.gypi
new file mode 100644
index 0000000..ffd0648
--- /dev/null
+++ b/build/android.gypi
@@ -0,0 +1,225 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Definitions for building standalone V8 binaries to run on Android.
+# This is mostly excerpted from:
+# http://src.chromium.org/viewvc/chrome/trunk/src/build/common.gypi
+
+{
+  'variables': {
+    # Location of Android NDK.
+    'variables': {
+      'variables': {
+        'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
+        'android_target_arch%': 'arm',  # target_arch in android terms.
+
+        # Switch between different build types, currently only '0' is
+        # supported.
+        'android_build_type%': 0,
+      },
+      'android_ndk_root%': '<(android_ndk_root)',
+      'android_ndk_sysroot': '<(android_ndk_root)/platforms/android-9/arch-<(android_target_arch)',
+      'android_build_type%': '<(android_build_type)',
+    },
+    'android_ndk_root%': '<(android_ndk_root)',
+    'android_ndk_sysroot': '<(android_ndk_sysroot)',
+    'android_ndk_include': '<(android_ndk_sysroot)/usr/include',
+    'android_ndk_lib': '<(android_ndk_sysroot)/usr/lib',
+    # Enable to use the system stlport, otherwise statically
+    # link the NDK one?
+    'use_system_stlport%': '<(android_build_type)',
+    'android_stlport_library': 'stlport_static',
+    # Copy it out one scope.
+    'android_build_type%': '<(android_build_type)',
+
+    'OS': 'android',
+    'target_arch': 'arm',
+    'v8_target_arch': 'arm',
+    'armv7': 1,
+    'arm_neon': 0,
+    'arm_fpu': 'vfpv3',
+  },  # variables
+  'target_defaults': {
+    'defines': [
+      'ANDROID',
+      'V8_ANDROID_LOG_STDOUT',
+    ],
+    'configurations': {
+      'Release': {
+        'cflags!': [
+          '-O2',
+          '-Os',
+        ],
+        'cflags': [
+          '-fdata-sections',
+          '-ffunction-sections',
+          '-fomit-frame-pointer',
+          '-O3',
+        ],
+      },  # Release
+    },  # configurations
+    'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter',
+                '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions', ],
+    'target_conditions': [
+      ['_toolset=="target"', {
+        'cflags!': [
+          '-pthread',  # Not supported by Android toolchain.
+        ],
+        'cflags': [
+          '-U__linux__',  # Don't allow toolchain to claim -D__linux__
+          '-ffunction-sections',
+          '-funwind-tables',
+          '-fstack-protector',
+          '-fno-short-enums',
+          '-finline-limit=64',
+          '-Wa,--noexecstack',
+          '-Wno-error=non-virtual-dtor',  # TODO(michaelbai): Fix warnings.
+          # Note: This include is in cflags to ensure that it comes after
+          # all of the includes.
+          '-I<(android_ndk_include)',
+          '-march=armv7-a',
+          '-mtune=cortex-a8',
+          '-mfpu=vfp3',
+        ],
+        'defines': [
+          'ANDROID',
+          #'__GNU_SOURCE=1',  # Necessary for clone()
+          'USE_STLPORT=1',
+          '_STLP_USE_PTR_SPECIALIZATIONS=1',
+          'HAVE_OFF64_T',
+          'HAVE_SYS_UIO_H',
+          'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
+        ],
+        'ldflags!': [
+          '-pthread',  # Not supported by Android toolchain.
+        ],
+        'ldflags': [
+          '-nostdlib',
+          '-Wl,--no-undefined',
+          '-Wl,--icf=safe',  # Enable identical code folding to reduce size
+          # Don't export symbols from statically linked libraries.
+          '-Wl,--exclude-libs=ALL',
+        ],
+        'libraries!': [
+            '-lrt',  # librt is built into Bionic.
+            # Not supported by Android toolchain.
+            # Where do these come from?  Can't find references in
+            # any Chromium gyp or gypi file.  Maybe they come from
+            # gyp itself?
+            '-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
+          ],
+          'libraries': [
+            '-l<(android_stlport_library)',
+            # Manually link the libgcc.a that the cross compiler uses.
+            '<!($CC -print-libgcc-file-name)',
+            '-lc',
+            '-ldl',
+            '-lstdc++',
+            '-lm',
+        ],
+        'conditions': [
+          ['android_build_type==0', {
+            'ldflags': [
+              '-Wl,-rpath-link=<(android_ndk_lib)',
+              '-L<(android_ndk_lib)',
+            ],
+          }],
+          # NOTE: The stlport header include paths below are specified in
+          # cflags rather than include_dirs because they need to come
+          # after include_dirs. Think of them like system headers, but
+          # don't use '-isystem' because the arm-linux-androideabi-4.4.3
+          # toolchain (circa Gingerbread) will exhibit strange errors.
+          # The include ordering here is important; change with caution.
+          ['use_system_stlport==0', {
+            'cflags': [
+              '-I<(android_ndk_root)/sources/cxx-stl/stlport/stlport',
+            ],
+            'conditions': [
+              ['target_arch=="arm" and armv7==1', {
+                'ldflags': [
+                  '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi-v7a',
+                ],
+              }],
+              ['target_arch=="arm" and armv7==0', {
+                'ldflags': [
+                  '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/armeabi',
+                ],
+              }],
+              ['target_arch=="ia32"', {
+                'ldflags': [
+                  '-L<(android_ndk_root)/sources/cxx-stl/stlport/libs/x86',
+                ],
+              }],
+            ],
+          }],
+          ['target_arch=="ia32"', {
+            # The x86 toolchain currently has problems with stack-protector.
+            'cflags!': [
+              '-fstack-protector',
+            ],
+            'cflags': [
+              '-fno-stack-protector',
+            ],
+          }],
+        ],
+        'target_conditions': [
+          ['_type=="executable"', {
+            'ldflags': [
+              '-Bdynamic',
+              '-Wl,-dynamic-linker,/system/bin/linker',
+              '-Wl,--gc-sections',
+              '-Wl,-z,nocopyreloc',
+              # crtbegin_dynamic.o should be the last item in ldflags.
+              '<(android_ndk_lib)/crtbegin_dynamic.o',
+            ],
+            'libraries': [
+              # crtend_android.o needs to be the last item in libraries.
+              # Do not add any libraries after this!
+              '<(android_ndk_lib)/crtend_android.o',
+            ],
+          }],
+          ['_type=="shared_library"', {
+            'ldflags': [
+              '-Wl,-shared,-Bsymbolic',
+            ],
+          }],
+        ],
+      }],  # _toolset=="target"
+      # Settings for building host targets using the system toolchain.
+      ['_toolset=="host"', {
+        'cflags': [ '-m32', '-pthread' ],
+        'ldflags': [ '-m32', '-pthread' ],
+        'ldflags!': [
+          '-Wl,-z,noexecstack',
+          '-Wl,--gc-sections',
+          '-Wl,-O1',
+          '-Wl,--as-needed',
+        ],
+      }],
+    ],  # target_conditions
+  },  # target_defaults
+}
\ No newline at end of file
diff --git a/build/common.gypi b/build/common.gypi
index 7aab913..c1c2b42 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -73,6 +73,9 @@
     # Enable profiling support. Only required on Windows.
     'v8_enable_prof%': 0,
 
+    # Some versions of GCC 4.5 seem to need -fno-strict-aliasing.
+    'v8_no_strict_aliasing%': 0,
+
     # Chrome needs this definition unconditionally. For standalone V8 builds,
     # it's handled in build/standalone.gypi.
     'want_separate_host_toolset%': 1,
@@ -208,6 +211,11 @@
           'COMPRESS_STARTUP_DATA_BZ2',
         ],
       }],
+      ['OS=="win"', {
+        'defines': [
+          'WIN32',
+        ],
+      }],
       ['OS=="win" and v8_enable_prof==1', {
         'msvs_settings': {
           'VCLinkerTool': {
@@ -222,12 +230,15 @@
             'cflags': [ '-m32' ],
             'ldflags': [ '-m32' ],
           }],
-        ],
+          [ 'v8_no_strict_aliasing==1', {
+            'cflags': [ '-fno-strict-aliasing' ],
+          }],
+        ],  # conditions
       }],
       ['OS=="solaris"', {
         'defines': [ '__C99FEATURES__=1' ],  # isinf() etc.
       }],
-    ],
+    ],  # conditions
     'configurations': {
       'Debug': {
         'defines': [
@@ -268,10 +279,11 @@
                         '-Wnon-virtual-dtor' ],
           }],
         ],
-      },
+      },  # Debug
       'Release': {
         'conditions': [
-          ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
+            or OS=="android"', {
             'cflags!': [
               '-O2',
               '-Os',
@@ -307,7 +319,7 @@
               # is specified explicitly.
               'GCC_STRICT_ALIASING': 'YES',
             },
-          }],
+          }],  # OS=="mac"
           ['OS=="win"', {
             'msvs_configuration_attributes': {
               'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
@@ -342,9 +354,9 @@
                 # 'StackReserveSize': '297152',
               },
             },
-          }],
-        ],
-      },
-    },
-  },
+          }],  # OS=="win"
+        ],  # conditions
+      },  # Release
+    },  # configurations
+  },  # target_defaults
 }
diff --git a/build/standalone.gypi b/build/standalone.gypi
index 86f6d46..e9b0565 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -106,7 +106,6 @@
     ['OS=="win"', {
       'target_defaults': {
         'defines': [
-          'WIN32',
           '_CRT_SECURE_NO_DEPRECATE',
           '_CRT_NONSTDC_NO_DEPRECATE',
         ],
diff --git a/src/api.cc b/src/api.cc
index bac3069..4146bd4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -741,6 +741,7 @@
   i::Context* last_context =
       isolate->handle_scope_implementer()->RestoreContext();
   isolate->set_context(last_context);
+  isolate->set_context_exit_happened(true);
 }
 
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 15ef9bc..c33df5c 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -5821,7 +5821,7 @@
   __ b(eq, &return_r0);
 
   Label result_longer_than_two;
-  // Check for special case of two character ascii string, in which case
+  // Check for special case of two character ASCII string, in which case
   // we do a lookup in the symbol table first.
   __ cmp(r2, Operand(2));
   __ b(gt, &result_longer_than_two);
@@ -5951,7 +5951,7 @@
   __ tst(r1, Operand(kStringEncodingMask));
   __ b(eq, &two_byte_sequential);
 
-  // Allocate and copy the resulting ascii string.
+  // Allocate and copy the resulting ASCII string.
   __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
 
   // Locate first character of substring to copy.
@@ -6268,7 +6268,7 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(r6, Operand(String::kMinNonFlatLength));
+  __ cmp(r6, Operand(ConsString::kMinLength));
   __ b(lt, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
@@ -6322,7 +6322,7 @@
   __ jmp(&allocated);
 
   // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
   // Handle creating a flat result from either external or sequential strings.
   // Locate the first characters' locations.
   // r0: first string
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 49b8db7..e767001 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -573,13 +573,13 @@
   // The naming of these accessor corresponds to figure A3-1.
   //
   // Two kind of accessors are declared:
-  // - <Name>Field() will return the raw field, ie the field's bits at their
+  // - <Name>Field() will return the raw field, i.e. the field's bits at their
   //   original place in the instruction encoding.
-  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
-  //   ConditionField(instr) will return 0xC0000000.
+  //   e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+  //   0xC0810002 ConditionField(instr) will return 0xC0000000.
   // - <Name>Value() will return the field value, shifted back to bit 0.
-  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
-  //   ConditionField(instr) will return 0xC.
+  //   e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+  //   0xC0810002 ConditionField(instr) will return 0xC.
 
 
   // Generally applicable fields
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 603b3cf..16b568c 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -473,7 +473,7 @@
       return 1;
     }
     case 'i': {  // 'i: immediate value from adjacent bits.
-      // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
+      // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
       int width = (format[3] - '0') * 10 + (format[4] - '0');
       int lsb   = (format[6] - '0') * 10 + (format[7] - '0');
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 38999a8..6654263 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -115,7 +115,7 @@
 // function.
 //
 // The live registers are:
-//   o r1: the JS function object being called (ie, ourselves)
+//   o r1: the JS function object being called (i.e., ourselves)
 //   o cp: our context
 //   o fp: our caller's frame pointer
 //   o sp: stack pointer
@@ -3618,7 +3618,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
+  // Replace separator with its ASCII character value.
   __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator
@@ -3629,7 +3629,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ascii char (in lower byte).
+  //   separator: Single separator ASCII char (in lower byte).
 
   // Copy the separator character to the result.
   __ strb(separator, MemOperand(result_pos, 1, PostIndex));
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index b5ed517..0e050fe 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -262,7 +262,7 @@
 
 bool LCodeGen::GenerateDeoptJumpTable() {
   // Check that the jump table is accessible from everywhere in the function
-  // code, ie that offsets to the table can be encoded in the 24bit signed
+  // code, i.e. that offsets to the table can be encoded in the 24bit signed
   // immediate of a branch instruction.
   // To simplify we consider the code size from the first instruction to the
   // end of the jump table. We also don't consider the pc load delta.
@@ -2828,7 +2828,7 @@
       this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is r0, as expected
   // by InvokeFunction.
-  v8::internal::ParameterCount actual(receiver);
+  ParameterCount actual(receiver);
   __ InvokeFunction(function, actual, CALL_FUNCTION,
                     safepoint_generator, CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2883,31 +2883,41 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  // Change context if needed.
-  bool change_context =
-      (info()->closure()->context() != function->context()) ||
-      scope()->contains_with() ||
-      (scope()->num_heap_slots() > 0);
-  if (change_context) {
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-  }
-
-  // Set r0 to arguments count if adaption is not needed. Assumes that r0
-  // is available to write to at this point.
-  if (!function->NeedsArgumentsAdaption()) {
-    __ mov(r0, Operand(arity));
-  }
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  // Invoke function.
-  __ SetCallKind(r5, call_kind);
-  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-  __ Call(ip);
+  if (can_invoke_directly) {
+    __ LoadHeapObject(r1, function);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+    if (change_context) {
+      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    }
 
-  // Set up deoptimization.
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Set r0 to arguments count if adaption is not needed. Assumes that r0
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ mov(r0, Operand(arity));
+    }
+
+    // Invoke function.
+    __ SetCallKind(r5, call_kind);
+    __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ Call(ip);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
 
   // Restore context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2916,7 +2926,6 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  __ LoadHeapObject(r1, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -3351,7 +3360,6 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
-  __ LoadHeapObject(r1, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index fa97611..9894ff2 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -957,10 +957,12 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label regular_invoke;
 
   // Check whether the expected and actual arguments count match. If not,
@@ -991,6 +993,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         mov(r2, Operand(expected.immediate()));
       }
     }
@@ -1018,7 +1021,9 @@
       SetCallKind(r5, call_kind);
       Call(adaptor);
       call_wrapper.AfterCall();
-      b(done);
+      if (!*definitely_mismatches) {
+        b(done);
+      }
     } else {
       SetCallKind(r5, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -1038,23 +1043,26 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
-
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, &definitely_mismatches, flag,
                  call_wrapper, call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(r5, call_kind);
-    Call(code);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(r5, call_kind);
-    Jump(code);
-  }
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(r5, call_kind);
+      Call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(r5, call_kind);
+      Jump(code);
+    }
 
-  // Continue here if InvokePrologue does handle the invocation due to
-  // mismatched parameter counts.
-  bind(&done);
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
 }
 
 
@@ -1068,20 +1076,23 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
-
-  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, code, no_reg,
+                 &done, &definitely_mismatches, flag,
                  NullCallWrapper(), call_kind);
-  if (flag == CALL_FUNCTION) {
-    SetCallKind(r5, call_kind);
-    Call(code, rmode);
-  } else {
-    SetCallKind(r5, call_kind);
-    Jump(code, rmode);
-  }
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      SetCallKind(r5, call_kind);
+      Call(code, rmode);
+    } else {
+      SetCallKind(r5, call_kind);
+      Jump(code, rmode);
+    }
 
-  // Continue here if InvokePrologue does handle the invocation due to
-  // mismatched parameter counts.
-  bind(&done);
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
 }
 
 
@@ -1116,6 +1127,7 @@
 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
+                                    const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   // You can't call a function without a valid frame.
   ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1129,7 +1141,7 @@
   // allow recompilation to take effect without changing any of the
   // call sites.
   ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-  InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
+  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
@@ -2387,7 +2399,7 @@
     b(gt, not_int32);
 
     // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
     // it rounds to zero.
     const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
     sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 4b55a3b..60c2e6f 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -243,7 +243,7 @@
                       Register scratch3,
                       Label* object_is_white_and_not_data);
 
-  // Detects conservatively whether an object is data-only, ie it does need to
+  // Detects conservatively whether an object is data-only, i.e. it does need to
   // be scanned by the garbage collector.
   void JumpIfDataObject(Register value,
                         Register scratch,
@@ -539,6 +539,7 @@
   void InvokeFunction(Handle<JSFunction> function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
+                      const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
   void IsObjectJSObjectType(Register heap_object,
@@ -606,7 +607,7 @@
   }
 
   // Check if the given instruction is a 'type' marker.
-  // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
+  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
   // These instructions are generated to mark special location in the code,
   // like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
@@ -810,7 +811,7 @@
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specificed map.
+  // against maps that are ElementsKind transition maps of the specified map.
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
@@ -908,7 +909,7 @@
   // Truncates a double using a specific rounding mode.
   // Clears the z flag (ne condition) if an overflow occurs.
   // If exact_conversion is true, the z flag is also cleared if the conversion
-  // was inexact, ie. if the double value could not be converted exactly
+  // was inexact, i.e. if the double value could not be converted exactly
   // to a 32bit integer.
   void EmitVFPTruncate(VFPRoundingMode rounding_mode,
                        SwVfpRegister result,
@@ -1025,7 +1026,7 @@
 
   // Calls an API function.  Allocates HandleScope, extracts returned value
   // from handle and propagates exceptions.  Restores context.  stack_space
-  // - space to be unwound on exit (includes the call js arguments space and
+  // - space to be unwound on exit (includes the call JS arguments space and
   // the additional space allocated for the fast call).
   void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
@@ -1248,6 +1249,7 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index c3a82ff..cf2d4e2 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -562,11 +562,11 @@
                                       int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
-  //  -- sp[4]              : callee js function
+  //  -- sp[4]              : callee JS function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last js argument
+  //  -- sp[12]             : last JS argument
   //  -- ...
-  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 3) * 4] : first JS argument
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
@@ -583,7 +583,7 @@
   } else {
     __ Move(r6, call_data);
   }
-  // Store js function and call data.
+  // Store JS function and call data.
   __ stm(ib, sp, r5.bit() | r6.bit());
 
   // r2 points to call data as expected by Arguments
@@ -738,7 +738,7 @@
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION, call_kind);
+                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -1904,7 +1904,8 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function,  arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -1983,7 +1984,7 @@
   __ vmrs(r3);
   // Set custom FPCSR:
   //  - Set rounding mode to "Round towards Minus Infinity"
-  //    (ie bits [23:22] = 0b10).
+  //    (i.e. bits [23:22] = 0b10).
   //  - Clear vfp cumulative exception flags (bits [3:0]).
   //  - Make sure Flush-to-zero mode control bit is unset (bit 22).
   __ bic(r9, r3,
@@ -2049,7 +2050,8 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2147,7 +2149,8 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
@@ -2325,7 +2328,8 @@
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
diff --git a/src/array.js b/src/array.js
index 3d8e278..16e37c5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -204,7 +204,7 @@
   if (IS_NULL_OR_UNDEFINED(e)) {
     return '';
   } else {
-    // According to ES5, seciton 15.4.4.3, the toLocaleString conversion
+    // According to ES5, section 15.4.4.3, the toLocaleString conversion
     // must throw a TypeError if ToObject(e).toLocaleString isn't
     // callable.
     var e_obj = ToObject(e);
diff --git a/src/assembler.h b/src/assembler.h
index 8c705a8..540c15a 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -271,7 +271,7 @@
   INLINE(void apply(intptr_t delta));
 
   // Is the pointer this relocation info refers to coded like a plain pointer
-  // or is it strange in some way (eg relative or patched into a series of
+  // or is it strange in some way (e.g. relative or patched into a series of
   // instructions).
   bool IsCodedSpecially();
 
diff --git a/src/ast.cc b/src/ast.cc
index 2e26999..7c5e3a7 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -724,17 +724,11 @@
 }
 
 
-static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
-  SharedFunctionInfo* info = target->shared();
-  // If the number of formal parameters of the target function does
-  // not match the number of arguments we're passing, we don't want to
-  // deal with it. Otherwise, we can call it directly.
-  return !target->NeedsArgumentsAdaption() ||
-      info->formal_parameter_count() == arity;
-}
-
-
 bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
+  // If there is an interceptor, we can't compute the target for
+  // a direct call.
+  if (type->has_named_interceptor()) return false;
+
   if (check_type_ == RECEIVER_MAP_CHECK) {
     // For primitive checks the holder is set up to point to the
     // corresponding prototype object, i.e. one step of the algorithm
@@ -754,7 +748,7 @@
       type = Handle<Map>(holder()->map());
     } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
       target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
-      return CanCallWithoutIC(target_, arguments()->length());
+      return true;
     } else {
       return false;
     }
@@ -774,8 +768,7 @@
     Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
     // If the function is in new space we assume it's more likely to
     // change and thus prefer the general IC code.
-    if (!HEAP->InNewSpace(*candidate) &&
-        CanCallWithoutIC(candidate, arguments()->length())) {
+    if (!HEAP->InNewSpace(*candidate)) {
       target_ = candidate;
       return true;
     }
diff --git a/src/bignum-dtoa.h b/src/bignum-dtoa.h
index ea1acbb..93ec1f7 100644
--- a/src/bignum-dtoa.h
+++ b/src/bignum-dtoa.h
@@ -44,7 +44,7 @@
   BIGNUM_DTOA_PRECISION
 };
 
-// Converts the given double 'v' to ascii.
+// Converts the given double 'v' to ASCII.
 // The result should be interpreted as buffer * 10^(point-length).
 // The buffer will be null-terminated.
 //
diff --git a/src/builtins.cc b/src/builtins.cc
index 69e5161..90a8d3e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -411,7 +411,7 @@
   int size_delta = to_trim * kPointerSize;
   if (heap->marking()->TransferMark(elms->address(),
                                     elms->address() + size_delta)) {
-    MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
+    MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
   }
 
   return FixedArray::cast(HeapObject::FromAddress(
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index 71be933..679c536 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,10 +49,14 @@
 class ReadLineEditor: public LineEditor {
  public:
   ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
-  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
+  virtual Handle<String> Prompt(const char* prompt);
   virtual bool Open();
   virtual bool Close();
   virtual void AddHistory(const char* str);
+
+  static const char* kHistoryFileName;
+  static const int kMaxHistoryEntries;
+
  private:
   static char** AttemptedCompletion(const char* text, int start, int end);
   static char* CompletionGenerator(const char* text, int state);
@@ -66,25 +70,34 @@
     '\0'};
 
 
+const char* ReadLineEditor::kHistoryFileName = ".d8_history";
+const int ReadLineEditor::kMaxHistoryEntries = 1000;
+
+
 bool ReadLineEditor::Open() {
   rl_initialize();
   rl_attempted_completion_function = AttemptedCompletion;
   rl_completer_word_break_characters = kWordBreakCharacters;
   rl_bind_key('\t', rl_complete);
   using_history();
-  stifle_history(Shell::kMaxHistoryEntries);
-  return read_history(Shell::kHistoryFileName) == 0;
+  stifle_history(kMaxHistoryEntries);
+  return read_history(kHistoryFileName) == 0;
 }
 
 
 bool ReadLineEditor::Close() {
-  return write_history(Shell::kHistoryFileName) == 0;
+  return write_history(kHistoryFileName) == 0;
 }
 
 
-i::SmartArrayPointer<char> ReadLineEditor::Prompt(const char* prompt) {
+Handle<String> ReadLineEditor::Prompt(const char* prompt) {
   char* result = readline(prompt);
-  return i::SmartArrayPointer<char>(result);
+  if (result != NULL) {
+    AddHistory(result);
+  } else {
+    return Handle<String>();
+  }
+  return String::New(result);
 }
 
 
@@ -118,10 +131,10 @@
   static unsigned current_index;
   static Persistent<Array> current_completions;
   if (state == 0) {
-    i::SmartArrayPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
     HandleScope scope;
+    Local<String> full_text = String::New(rl_line_buffer, rl_point);
     Handle<Array> completions =
-      Shell::GetCompletions(String::New(text), String::New(*full_text));
+      Shell::GetCompletions(String::New(text), full_text);
     current_completions = Persistent<Array>::New(completions);
     current_index = 0;
   }
diff --git a/src/d8.cc b/src/d8.cc
index 97828a4..b612c48 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -66,11 +66,7 @@
 
 namespace v8 {
 
-
-#ifndef V8_SHARED
 LineEditor *LineEditor::first_ = NULL;
-const char* Shell::kHistoryFileName = ".d8_history";
-const int Shell::kMaxHistoryEntries = 1000;
 
 
 LineEditor::LineEditor(Type type, const char* name)
@@ -96,31 +92,29 @@
 class DumbLineEditor: public LineEditor {
  public:
   DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
-  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
+  virtual Handle<String> Prompt(const char* prompt);
 };
 
 
 static DumbLineEditor dumb_line_editor;
 
 
-i::SmartArrayPointer<char> DumbLineEditor::Prompt(const char* prompt) {
-  static const int kBufferSize = 256;
-  char buffer[kBufferSize];
+Handle<String> DumbLineEditor::Prompt(const char* prompt) {
   printf("%s", prompt);
-  char* str = fgets(buffer, kBufferSize, stdin);
-  return i::SmartArrayPointer<char>(str ? i::StrDup(str) : str);
+  return Shell::ReadFromStdin();
 }
 
 
+#ifndef V8_SHARED
 CounterMap* Shell::counter_map_;
 i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
 CounterCollection* Shell::counters_ = &local_counters_;
 i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
 Persistent<Context> Shell::utility_context_;
-LineEditor* Shell::console = NULL;
 #endif  // V8_SHARED
 
+LineEditor* Shell::console = NULL;
 Persistent<Context> Shell::evaluation_context_;
 ShellOptions Shell::options;
 const char* Shell::kPrompt = "d8> ";
@@ -238,7 +232,7 @@
 }
 
 
-Handle<Value> Shell::ReadLine(const Arguments& args) {
+Handle<String> Shell::ReadFromStdin() {
   static const int kBufferSize = 256;
   char buffer[kBufferSize];
   Handle<String> accumulator = String::New("");
@@ -247,7 +241,7 @@
     // Continue reading if the line ends with an escape '\\' or the line has
     // not been fully read into the buffer yet (does not end with '\n').
     // If fgets gets an error, just give up.
-    if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
+    if (fgets(buffer, kBufferSize, stdin) == NULL) return Handle<String>();
     length = static_cast<int>(strlen(buffer));
     if (length == 0) {
       return accumulator;
@@ -1047,28 +1041,15 @@
   Context::Scope context_scope(evaluation_context_);
   HandleScope outer_scope;
   Handle<String> name = String::New("(d8)");
-#ifndef V8_SHARED
   console = LineEditor::Get();
   printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
   console->Open();
   while (true) {
-    i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
-    if (input.is_empty()) break;
-    console->AddHistory(*input);
     HandleScope inner_scope;
-    ExecuteString(String::New(*input), name, true, true);
+    Handle<String> input = console->Prompt(Shell::kPrompt);
+    if (input.IsEmpty()) break;
+    ExecuteString(input, name, true, true);
   }
-#else
-  printf("V8 version %s [D8 light using shared library]\n", V8::GetVersion());
-  static const int kBufferSize = 256;
-  while (true) {
-    char buffer[kBufferSize];
-    printf("%s", Shell::kPrompt);
-    if (fgets(buffer, kBufferSize, stdin) == NULL) break;
-    HandleScope inner_scope;
-    ExecuteString(String::New(buffer), name, true, true);
-  }
-#endif  // V8_SHARED
   printf("\n");
 }
 
diff --git a/src/d8.gyp b/src/d8.gyp
index a096af3..3b92d03 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -1,4 +1,4 @@
-# Copyright 2010 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
diff --git a/src/d8.h b/src/d8.h
index 6c7733c..e9b417b 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -116,14 +116,13 @@
 #endif  // V8_SHARED
 
 
-#ifndef V8_SHARED
 class LineEditor {
  public:
   enum Type { DUMB = 0, READLINE = 1 };
   LineEditor(Type type, const char* name);
   virtual ~LineEditor() { }
 
-  virtual i::SmartArrayPointer<char> Prompt(const char* prompt) = 0;
+  virtual Handle<String> Prompt(const char* prompt) = 0;
   virtual bool Open() { return true; }
   virtual bool Close() { return true; }
   virtual void AddHistory(const char* str) { }
@@ -136,7 +135,6 @@
   LineEditor* next_;
   static LineEditor* first_;
 };
-#endif  // V8_SHARED
 
 
 class SourceGroup {
@@ -287,7 +285,10 @@
   static Handle<Value> EnableProfiler(const Arguments& args);
   static Handle<Value> DisableProfiler(const Arguments& args);
   static Handle<Value> Read(const Arguments& args);
-  static Handle<Value> ReadLine(const Arguments& args);
+  static Handle<String> ReadFromStdin();
+  static Handle<Value> ReadLine(const Arguments& args) {
+    return ReadFromStdin();
+  }
   static Handle<Value> Load(const Arguments& args);
   static Handle<Value> ArrayBuffer(const Arguments& args);
   static Handle<Value> Int8Array(const Arguments& args);
@@ -335,11 +336,8 @@
   static Handle<Value> RemoveDirectory(const Arguments& args);
 
   static void AddOSMethods(Handle<ObjectTemplate> os_template);
-#ifndef V8_SHARED
-  static const char* kHistoryFileName;
-  static const int kMaxHistoryEntries;
+
   static LineEditor* console;
-#endif  // V8_SHARED
   static const char* kPrompt;
   static ShellOptions options;
 
diff --git a/src/dtoa.h b/src/dtoa.h
index a2d6fde..948a079 100644
--- a/src/dtoa.h
+++ b/src/dtoa.h
@@ -49,7 +49,7 @@
 // be at least kBase10MaximalLength + 1 characters long.
 const int kBase10MaximalLength = 17;
 
-// Converts the given double 'v' to ascii.
+// Converts the given double 'v' to ASCII.
 // The result should be interpreted as buffer * 10^(point-length).
 //
 // The output depends on the given mode:
diff --git a/src/execution.cc b/src/execution.cc
index 125241c..8a0242f 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -356,7 +356,7 @@
 
 void StackGuard::SetStackLimit(uintptr_t limit) {
   ExecutionAccess access(isolate_);
-  // If the current limits are special (eg due to a pending interrupt) then
+  // If the current limits are special (e.g. due to a pending interrupt) then
   // leave them alone.
   uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
   if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
diff --git a/src/factory.h b/src/factory.h
index 8725b67..e68cc7e 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -233,7 +233,7 @@
   Handle<FixedDoubleArray> CopyFixedDoubleArray(
       Handle<FixedDoubleArray> array);
 
-  // Numbers (eg, literals) are pretenured by the parser.
+  // Numbers (e.g. literals) are pretenured by the parser.
   Handle<Object> NewNumber(double value,
                            PretenureFlag pretenure = NOT_TENURED);
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index fbb6979..a1368c2 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -618,8 +618,8 @@
                              Label** if_false,
                              Label** fall_through) const = 0;
 
-    // Returns true if we are evaluating only for side effects (ie if the result
-    // will be discarded).
+    // Returns true if we are evaluating only for side effects (i.e. if the
+    // result will be discarded).
     virtual bool IsEffect() const { return false; }
 
     // Returns true if we are evaluating for the value (in accu/on stack).
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 87066fa..471f5a3 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -232,7 +232,7 @@
       VMState state(isolate, EXTERNAL);
       func(object, par);
     }
-    // Absense of explicit cleanup or revival of weak handle
+    // Absence of explicit cleanup or revival of weak handle
     // in most of the cases would lead to memory leak.
     ASSERT(state_ != NEAR_DEATH);
     return true;
diff --git a/src/heap.cc b/src/heap.cc
index 3c871e2..fff1319 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -872,6 +872,8 @@
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   contexts_disposed_ = 0;
+
+  isolate_->set_context_exit_happened(false);
 }
 
 
@@ -1552,7 +1554,7 @@
 
     if (marks_handling == TRANSFER_MARKS) {
       if (Marking::TransferColor(source, target)) {
-        MemoryChunk::IncrementLiveBytes(target->address(), size);
+        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
       }
     }
   }
@@ -2923,8 +2925,8 @@
   bool is_ascii_data_in_two_byte_string = false;
   if (!is_ascii) {
     // At least one of the strings uses two-byte representation so we
-    // can't use the fast case code for short ascii strings below, but
-    // we can try to save memory if all chars actually fit in ascii.
+    // can't use the fast case code for short ASCII strings below, but
+    // we can try to save memory if all chars actually fit in ASCII.
     is_ascii_data_in_two_byte_string =
         first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
     if (is_ascii_data_in_two_byte_string) {
@@ -2933,9 +2935,9 @@
   }
 
   // If the resulting string is small make a flat string.
-  if (length < String::kMinNonFlatLength) {
+  if (length < ConsString::kMinLength) {
     // Note that neither of the two inputs can be a slice because:
-    STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
+    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
     ASSERT(first->IsFlat());
     ASSERT(second->IsFlat());
     if (is_ascii) {
@@ -3011,7 +3013,7 @@
                                      int end,
                                      PretenureFlag pretenure) {
   int length = end - start;
-  if (length == 0) {
+  if (length <= 0) {
     return empty_string();
   } else if (length == 1) {
     return LookupSingleCharacterStringFromCode(buffer->Get(start));
@@ -3635,8 +3637,8 @@
   // TODO(1240798): Initialize the object's body using valid initial values
   // according to the object's initial map.  For example, if the map's
   // instance type is JS_ARRAY_TYPE, the length field should be initialized
-  // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
-  // fixed array (eg, Heap::empty_fixed_array()).  Currently, the object
+  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
   // verification code has to cope with (temporarily) invalid objects.  See
   // for example, JSArray::JSArrayVerify).
   Object* filler;
@@ -4103,7 +4105,7 @@
   ASSERT(chars >= 0);
   // Ensure the chars matches the number of characters in the buffer.
   ASSERT(static_cast<unsigned>(chars) == buffer->Length());
-  // Determine whether the string is ascii.
+  // Determine whether the string is ASCII.
   bool is_ascii = true;
   while (buffer->has_more()) {
     if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
@@ -5596,7 +5598,7 @@
   // goes wrong, just return false. The caller should check the results and
   // call Heap::TearDown() to release allocated memory.
   //
-  // If the heap is not yet configured (eg, through the API), configure it.
+  // If the heap is not yet configured (e.g. through the API), configure it.
   // Configuration is based on the flags new-space-size (really the semispace
   // size) and old-space-size if set or the initial values of semispace_size_
   // and old_generation_size_ otherwise.
diff --git a/src/heap.h b/src/heap.h
index a1a53db..937b034 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -690,7 +690,7 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Computes a single character string where the character has code.
-  // A cache is used for ascii codes.
+  // A cache is used for ASCII codes.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed. Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
@@ -2376,7 +2376,7 @@
   intptr_t start_size_;  // Size of objects in heap set in constructor.
   GarbageCollector collector_;  // Type of collector.
 
-  // A count (including this one, eg, the first collection is 1) of the
+  // A count (including this one, e.g. the first collection is 1) of the
   // number of garbage collections.
   unsigned int gc_count_;
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 47dcc80..56da2dc 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -4266,14 +4266,6 @@
   HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
   bool fast_smi_only_elements = map->has_fast_smi_only_elements();
   bool fast_elements = map->has_fast_elements();
-  bool fast_double_elements = map->has_fast_double_elements();
-  if (!fast_smi_only_elements &&
-      !fast_elements &&
-      !fast_double_elements &&
-      !map->has_external_array_elements()) {
-    return is_store ? BuildStoreKeyedGeneric(object, key, val)
-                    : BuildLoadKeyedGeneric(object, key);
-  }
   HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
   if (is_store && (fast_elements || fast_smi_only_elements)) {
     AddInstruction(new(zone()) HCheckMap(
@@ -4290,7 +4282,9 @@
     return BuildExternalArrayElementAccess(external_elements, checked_key,
                                            val, map->elements_kind(), is_store);
   }
-  ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
+  ASSERT(fast_smi_only_elements ||
+         fast_elements ||
+         map->has_fast_double_elements());
   if (map->instance_type() == JS_ARRAY_TYPE) {
     length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
   } else {
@@ -4362,8 +4356,14 @@
   // If only one map is left after transitioning, handle this case
   // monomorphically.
   if (num_untransitionable_maps == 1) {
-    HInstruction* instr = AddInstruction(BuildMonomorphicElementAccess(
-        object, key, val, untransitionable_map, is_store));
+    HInstruction* instr = NULL;
+    if (untransitionable_map->has_slow_elements_kind()) {
+      instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
+                                      : BuildLoadKeyedGeneric(object, key));
+    } else {
+      instr = AddInstruction(BuildMonomorphicElementAccess(
+          object, key, val, untransitionable_map, is_store));
+    }
     *has_side_effects |= instr->HasObservableSideEffects();
     instr->set_position(position);
     return is_store ? NULL : instr;
@@ -4499,8 +4499,13 @@
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
     Handle<Map> map = expr->GetMonomorphicReceiverType();
-    AddInstruction(new(zone()) HCheckNonSmi(obj));
-    instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
+    if (map->has_slow_elements_kind()) {
+      instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
+                       : BuildLoadKeyedGeneric(obj, key);
+    } else {
+      AddInstruction(new(zone()) HCheckNonSmi(obj));
+      instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
+    }
   } else if (expr->GetReceiverTypes() != NULL &&
              !expr->GetReceiverTypes()->is_empty()) {
     return HandlePolymorphicElementAccess(
@@ -6167,6 +6172,15 @@
 }
 
 
+static bool IsLiteralCompareBool(HValue* left,
+                                 Token::Value op,
+                                 HValue* right) {
+  return op == Token::EQ_STRICT &&
+      ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
+       (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+}
+
+
 void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
@@ -6214,6 +6228,12 @@
   if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
     return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
   }
+  if (IsLiteralCompareBool(left, op, right)) {
+    HCompareObjectEqAndBranch* result =
+        new(zone()) HCompareObjectEqAndBranch(left, right);
+    result->set_position(expr->position());
+    return ast_context()->ReturnControl(result, expr->id());
+  }
 
   if (op == Token::INSTANCEOF) {
     // Check to see if the rhs of the instanceof is a global function not
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index b654390..eded335 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -749,7 +749,7 @@
     // Exponent word in scratch, exponent part of exponent word in scratch2.
     // Zero in ecx.
     // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
     // it rounds to zero.
     const uint32_t zero_exponent =
         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
@@ -3723,7 +3723,7 @@
                kShortExternalStringMask);
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ascii string.  None of the following
+  // Any other flat string must be a flat ASCII string.  None of the following
   // string type tests will succeed if subject is not a string or a short
   // external string.
   __ and_(ebx, Immediate(kIsNotStringMask |
@@ -3772,16 +3772,16 @@
             kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be sequential ascii or external.
+  // Any other flat string must be sequential ASCII or external.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask);
   __ j(not_zero, &external_string);
 
   __ bind(&seq_ascii_string);
-  // eax: subject string (flat ascii)
+  // eax: subject string (flat ASCII)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(ecx, Immediate(1));  // Type is ascii.
+  __ Set(ecx, Immediate(1));  // Type is ASCII.
   __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
@@ -3798,7 +3798,7 @@
 
   // eax: subject string
   // edx: code
-  // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
+  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
   __ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3807,7 +3807,7 @@
   // eax: subject string
   // ebx: previous index
   // edx: code
-  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3847,7 +3847,7 @@
   // esi: original subject string
   // eax: underlying subject string
   // ebx: previous index
-  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
   // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
@@ -4475,7 +4475,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
                                          &check_unequal_objects);
 
-  // Inline comparison of ascii strings.
+  // Inline comparison of ASCII strings.
   if (cc_ == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      edx,
@@ -5428,7 +5428,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged ascii char code.
+  // At this point code register contains smi tagged ASCII char code.
   __ mov(result_, FieldOperand(result_,
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
@@ -5548,7 +5548,7 @@
   __ cmp(ebx, Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
-  // Check that both strings are non-external ascii strings.
+  // Check that both strings are non-external ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
 
   // Get the two characters forming the new string.
@@ -5585,11 +5585,11 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
   __ j(below, &string_add_flat_result);
 
   // If result is not supposed to be flat allocate a cons string object. If both
-  // strings are ascii the result is an ascii cons string.
+  // strings are ASCII the result is an ASCII cons string.
   Label non_ascii, allocated, ascii_data;
   __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
@@ -5601,7 +5601,7 @@
   __ test(ecx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
-  // Allocate an acsii cons string.
+  // Allocate an ASCII cons string.
   __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
@@ -5616,7 +5616,7 @@
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ascii characters.
+  // to contain only ASCII characters.
   // ecx: first instance type AND second instance type.
   // edi: second instance type.
   __ test(ecx, Immediate(kAsciiDataHintMask));
@@ -5633,7 +5633,7 @@
   __ jmp(&allocated);
 
   // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
   // Handle creating a flat result from either external or sequential strings.
   // Locate the first characters' locations.
   // eax: first string
@@ -5691,7 +5691,7 @@
   __ test_b(edi, kStringEncodingMask);
   __ j(zero, &non_ascii_string_add_flat_result);
 
-  // Both strings are ascii strings.
+  // Both strings are ASCII strings.
   // ebx: length of resulting flat string as a smi
   __ SmiUntag(ebx);
   __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
@@ -6001,7 +6001,7 @@
     __ push(mask);
     Register temp = mask;
 
-    // Check that the candidate is a non-external ascii string.
+    // Check that the candidate is a non-external ASCII string.
     __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
     __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
@@ -6042,6 +6042,7 @@
     __ mov(scratch, Operand::StaticArray(scratch,
                                          times_pointer_size,
                                          roots_array_start));
+    __ SmiUntag(scratch);
     __ add(scratch, character);
     __ mov(hash, scratch);
     __ shl(scratch, 10);
@@ -6280,7 +6281,7 @@
   __ test_b(ebx, kStringEncodingMask);
   __ j(zero, &two_byte_sequential);
 
-  // Sequential ascii string.  Allocate the result.
+  // Sequential ASCII string.  Allocate the result.
   __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
@@ -6493,10 +6494,10 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ascii strings.
+  // Check that both objects are sequential ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
 
-  // Compare flat ascii strings.
+  // Compare flat ASCII strings.
   // Drop arguments from the stack.
   __ pop(ecx);
   __ add(esp, Immediate(2 * kPointerSize));
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 4f32744..ede810c 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -106,7 +106,7 @@
 // formal parameter count expected by the function.
 //
 // The live registers are:
-//   o edi: the JS function object being called (ie, ourselves)
+//   o edi: the JS function object being called (i.e. ourselves)
 //   o esi: our context
 //   o ebp: our caller's frame pointer
 //   o esp: stack pointer (pointing to return address)
@@ -227,7 +227,7 @@
            Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
     __ push(edx);
     __ SafePush(Immediate(Smi::FromInt(num_parameters)));
-    // Arguments to ArgumentsAccessStub and/or New...:
+    // Arguments to ArgumentsAccessStub:
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
@@ -3571,7 +3571,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
+  // Replace separator with its ASCII character value.
   __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ mov_b(separator_operand, scratch);
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 8d412fd..a5c96b0 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2701,41 +2701,53 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  // Change context if needed.
-  bool change_context =
-      (info()->closure()->context() != function->context()) ||
-      scope()->contains_with() ||
-      (scope()->num_heap_slots() > 0);
-  if (change_context) {
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-  } else {
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
-
-  // Set eax to arguments count if adaption is not needed. Assumes that eax
-  // is available to write to at this point.
-  if (!function->NeedsArgumentsAdaption()) {
-    __ mov(eax, arity);
-  }
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  // Invoke function.
-  __ SetCallKind(ecx, call_kind);
-  if (*function == *info()->closure()) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-  }
+  if (can_invoke_directly) {
+    __ LoadHeapObject(edi, function);
 
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+
+    if (change_context) {
+      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+    } else {
+      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    }
+
+    // Set eax to arguments count if adaption is not needed. Assumes that eax
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ mov(eax, arity);
+    }
+
+    // Invoke function directly.
+    __ SetCallKind(ecx, call_kind);
+    if (*function == *info()->closure()) {
+      __ CallSelf();
+    } else {
+      __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+    }
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    // We need to adapt arguments.
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
 }
 
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  __ LoadHeapObject(edi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -3190,7 +3202,6 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
-  __ LoadHeapObject(edi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 43f265c..d0d9e19 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1387,7 +1387,7 @@
   add(scratch1, Immediate(kObjectAlignmentMask));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
-  // Allocate ascii string in new space.
+  // Allocate ASCII string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
                      times_1,
                      scratch1,
@@ -1415,7 +1415,7 @@
                                          Label* gc_required) {
   ASSERT(length > 0);
 
-  // Allocate ascii string in new space.
+  // Allocate ASCII string in new space.
   AllocateInNewSpace(SeqAsciiString::SizeFor(length),
                      result,
                      scratch1,
@@ -1933,11 +1933,13 @@
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     Label::Distance done_near,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
     ASSERT(actual.is_immediate());
@@ -1953,6 +1955,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         mov(ebx, expected.immediate());
       }
     }
@@ -1990,7 +1993,9 @@
       SetCallKind(ecx, call_kind);
       call(adaptor, RelocInfo::CODE_TARGET);
       call_wrapper.AfterCall();
-      jmp(done, done_near);
+      if (!*definitely_mismatches) {
+        jmp(done, done_near);
+      }
     } else {
       SetCallKind(ecx, call_kind);
       jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -2010,20 +2015,23 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
+  bool definitely_mismatches = false;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, flag, Label::kNear, call_wrapper,
-                 call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(ecx, call_kind);
-    call(code);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(ecx, call_kind);
-    jmp(code);
+                 &done, &definitely_mismatches, flag, Label::kNear,
+                 call_wrapper, call_kind);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(ecx, call_kind);
+      call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(ecx, call_kind);
+      jmp(code);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -2039,19 +2047,22 @@
 
   Label done;
   Operand dummy(eax, 0);
-  InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
-                 call_wrapper, call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code, rmode));
-    SetCallKind(ecx, call_kind);
-    call(code, rmode);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(ecx, call_kind);
-    jmp(code, rmode);
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
+                 flag, Label::kNear, call_wrapper, call_kind);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code, rmode));
+      SetCallKind(ecx, call_kind);
+      call(code, rmode);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(ecx, call_kind);
+      jmp(code, rmode);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -2464,7 +2475,7 @@
   movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ascii strings.
+  // Check that both are flat ASCII strings.
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index c969a6f..0fcb94f 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -368,7 +368,7 @@
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specificed map.
+  // against maps that are ElementsKind transition maps of the specified map.
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
@@ -791,7 +791,7 @@
   // ---------------------------------------------------------------------------
   // String utilities.
 
-  // Check whether the instance type represents a flat ascii string. Jump to the
+  // Check whether the instance type represents a flat ASCII string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
@@ -827,6 +827,7 @@
                       Handle<Code> code_constant,
                       const Operand& code_operand,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       Label::Distance done_distance,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index dbf01ab..e613a06 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -210,7 +210,7 @@
                                                bool check_end_of_string) {
 #ifdef DEBUG
   // If input is ASCII, don't even bother calling here if the string to
-  // match contains a non-ascii character.
+  // match contains a non-ASCII character.
   if (mode_ == ASCII) {
     ASSERT(String::IsAscii(str.start(), str.length()));
   }
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 0da51c8..0adb101 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -992,7 +992,7 @@
 
   __ push(scratch3);  // Restore return address.
 
-  // 3 elements array for v8::Agruments::values_, handler for name and pointer
+  // 3 elements array for v8::Arguments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
   const int kStackSpace = 5;
   const int kApiArgc = 2;
diff --git a/src/ic.cc b/src/ic.cc
index 9024605..4361ba2 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
index 7ae2c99..3e3d6c4 100644
--- a/src/incremental-marking-inl.h
+++ b/src/incremental-marking-inl.h
@@ -95,7 +95,7 @@
   ASSERT(IsMarking());
   Marking::BlackToGrey(mark_bit);
   int obj_size = obj->Size();
-  MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
   bytes_scanned_ -= obj_size;
   int64_t old_bytes_rescanned = bytes_rescanned_;
   bytes_rescanned_ = old_bytes_rescanned + obj_size;
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index f6d5a59..6248524 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -225,8 +225,8 @@
     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
     if (mark_bit.data_only()) {
       if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
-        MemoryChunk::IncrementLiveBytes(heap_object->address(),
-                                        heap_object->Size());
+        MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+                                              heap_object->Size());
       }
     } else if (Marking::IsWhite(mark_bit)) {
       incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
@@ -263,8 +263,8 @@
     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
     if (mark_bit.data_only()) {
       if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
-          MemoryChunk::IncrementLiveBytes(heap_object->address(),
-                                          heap_object->Size());
+          MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+                                                heap_object->Size());
       }
     } else {
       if (Marking::IsWhite(mark_bit)) {
@@ -491,8 +491,8 @@
     HeapObject* heap_obj = HeapObject::cast(obj);
     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
     if (Marking::IsBlack(mark_bit)) {
-      MemoryChunk::IncrementLiveBytes(heap_obj->address(),
-                                      -heap_obj->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+                                            -heap_obj->Size());
     }
     Marking::AnyToGrey(mark_bit);
   }
@@ -658,7 +658,7 @@
       MarkBit mark_bit = Marking::MarkBitFrom(obj);
       ASSERT(!Marking::IsBlack(mark_bit));
       Marking::MarkBlack(mark_bit);
-      MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
     }
     state_ = COMPLETE;
     if (FLAG_trace_incremental_marking) {
@@ -671,8 +671,8 @@
   if (FLAG_cleanup_code_caches_at_gc) {
     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
-    MemoryChunk::IncrementLiveBytes(poly_cache->address(),
-                                    PolymorphicCodeCache::kSize);
+    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+                                          PolymorphicCodeCache::kSize);
   }
 
   Object* context = heap_->global_contexts_list();
@@ -685,7 +685,7 @@
       MarkBit mark_bit = Marking::MarkBitFrom(cache);
       if (Marking::IsGrey(mark_bit)) {
         Marking::GreyToBlack(mark_bit);
-        MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
+        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
       }
     }
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -819,7 +819,7 @@
       SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
                   (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
       Marking::MarkBlack(obj_mark_bit);
-      MemoryChunk::IncrementLiveBytes(obj->address(), size);
+      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
     }
     if (marking_deque_.IsEmpty()) MarkingComplete();
   }
diff --git a/src/isolate.cc b/src/isolate.cc
index 35e9e28..82af337 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -1219,7 +1219,7 @@
   ASSERT(has_pending_exception());
   PropagatePendingExceptionToExternalTryCatch();
 
-  // Allways reschedule out of memory exceptions.
+  // Always reschedule out of memory exceptions.
   if (!is_out_of_memory()) {
     bool is_termination_exception =
         pending_exception() == heap_.termination_exception();
@@ -1454,7 +1454,8 @@
       has_installed_extensions_(false),
       string_tracker_(NULL),
       regexp_stack_(NULL),
-      embedder_data_(NULL) {
+      embedder_data_(NULL),
+      context_exit_happened_(false) {
   TRACE_ISOLATE(constructor);
 
   memset(isolate_addresses_, 0,
diff --git a/src/isolate.h b/src/isolate.h
index 4e5c7db..89793f8 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -258,7 +258,7 @@
 #endif
 #endif  // USE_SIMULATOR
 
-  Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
+  Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
   Address external_callback_;  // the external callback we're currently in
   StateTag current_vm_state_;
 
@@ -485,7 +485,7 @@
   bool IsDefaultIsolate() const { return this == default_isolate_; }
 
   // Ensures that process-wide resources and the default isolate have been
-  // allocated. It is only necessary to call this method in rare casses, for
+  // allocated. It is only necessary to call this method in rare cases, for
   // example if you are using V8 from within the body of a static initializer.
   // Safe to call multiple times.
   static void EnsureDefaultIsolate();
@@ -635,7 +635,7 @@
   void* formal_count_address() { return &thread_local_top_.formal_count_; }
 
   // Returns the global object of the current context. It could be
-  // a builtin object, or a js global object.
+  // a builtin object, or a JS global object.
   Handle<GlobalObject> global() {
     return Handle<GlobalObject>(context()->global());
   }
@@ -1023,6 +1023,13 @@
     thread_local_top_.top_lookup_result_ = top;
   }
 
+  bool context_exit_happened() {
+    return context_exit_happened_;
+  }
+  void set_context_exit_happened(bool context_exit_happened) {
+    context_exit_happened_ = context_exit_happened;
+  }
+
  private:
   Isolate();
 
@@ -1188,6 +1195,10 @@
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
   void* embedder_data_;
 
+  // The garbage collector should be a little more aggressive when it knows
+  // that a context was recently exited.
+  bool context_exit_happened_;
+
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
   bool simulator_initialized_;
diff --git a/src/json-parser.h b/src/json-parser.h
index 2b7077e..d22cd0d 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -130,7 +130,7 @@
   // An object literal is a squiggly-braced and comma separated sequence
   // (possibly empty) of key/value pairs, where the key is a JSON string
   // literal, the value is a JSON value, and the two are separated by a colon.
-  // A JSON array dosn't allow numbers and identifiers as keys, like a
+  // A JSON array doesn't allow numbers and identifiers as keys, like a
   // JavaScript array.
   Handle<Object> ParseJsonObject();
 
@@ -177,7 +177,7 @@
 
   // Set initial position right before the string.
   position_ = -1;
-  // Advance to the first character (posibly EOS)
+  // Advance to the first character (possibly EOS)
   AdvanceSkipWhitespace();
   Handle<Object> result = ParseJsonValue();
   if (result.is_null() || c0_ != kEndOfString) {
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 15b80d9..18b86ba 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -704,7 +704,7 @@
 //   the virtualized backtrack stack and some register changes.  When a node is
 //   to be emitted it can flush the Trace or update it.  Flushing the Trace
 //   will emit code to bring the actual state into line with the virtual state.
-//   Avoiding flushing the state can postpone some work (eg updates of capture
+//   Avoiding flushing the state can postpone some work (e.g. updates of capture
 //   registers).  Postponing work can save time when executing the regular
 //   expression since it may be found that the work never has to be done as a
 //   failure to match can occur.  In addition it is much faster to jump to a
diff --git a/src/jsregexp.h b/src/jsregexp.h
index df110d1..0e01849 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -634,7 +634,7 @@
   static const int kNodeIsTooComplexForGreedyLoops = -1;
   virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
   Label* label() { return &label_; }
-  // If non-generic code is generated for a node (ie the node is not at the
+  // If non-generic code is generated for a node (i.e. the node is not at the
   // start of the trace) then it cannot be reused.  This variable sets a limit
   // on how often we allow that to happen before we insist on starting a new
   // trace and generating generic code for a node that can be reused by flushing
diff --git a/src/list.h b/src/list.h
index 57504e0..adddea4 100644
--- a/src/list.h
+++ b/src/list.h
@@ -67,7 +67,7 @@
 
   // Returns a reference to the element at index i.  This reference is
   // not safe to use after operations that can change the list's
-  // backing store (eg, Add).
+  // backing store (e.g. Add).
   inline T& operator[](int i) const {
     ASSERT(0 <= i);
     ASSERT(i < length_);
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index c94a3ee..abfb0f6 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -581,7 +581,7 @@
   // children of unchanged functions are ignored.
   function MarkChangedFunctions(code_info_tree, chunks) {
 
-    // A convenient interator over diff chunks that also translates
+    // A convenient iterator over diff chunks that also translates
     // positions from old to new in a current non-changed part of script.
     var chunk_it = new function() {
       var chunk_index = 0;
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 436204e..1aabc59 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -462,7 +462,7 @@
   char prev_ch = 0;
   while (*dst != '\0') {
     char ch = *src++;
-    // We will treat non-ascii chars as '?'.
+    // We will treat non-ASCII chars as '?'.
     if ((ch & 0x80) != 0) {
       ch = '?';
     }
diff --git a/src/macros.py b/src/macros.py
index 34b07ab..8e9c62d 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -26,7 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native js files.
+# Used for defines that must be defined for all native JS files.
 
 const NONE        = 0;
 const READ_ONLY   = 1;
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
index 573715e..64faf82 100644
--- a/src/mark-compact-inl.h
+++ b/src/mark-compact-inl.h
@@ -49,11 +49,18 @@
 }
 
 
+void MarkCompactCollector::ClearCacheOnMap(Map* map) {
+  if (FLAG_cleanup_code_caches_at_gc) {
+    map->ClearCodeCache(heap());
+  }
+}
+
+
 void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
   ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
   if (!mark_bit.Get()) {
     mark_bit.Set();
-    MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
     ProcessNewlyMarkedObject(obj);
   }
 }
@@ -63,7 +70,10 @@
   ASSERT(!mark_bit.Get());
   ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
   mark_bit.Set();
-  MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+  if (obj->IsMap()) {
+    ClearCacheOnMap(Map::cast(obj));
+  }
 }
 
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 6d7fbdf..0ca1bfd 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -63,6 +63,7 @@
       compacting_(false),
       was_marked_incrementally_(false),
       collect_maps_(FLAG_collect_maps),
+      flush_monomorphic_ics_(false),
       tracer_(NULL),
       migration_slots_buffer_(NULL),
       heap_(NULL),
@@ -515,6 +516,12 @@
   // order which is not implemented for incremental marking.
   collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
 
+  // Monomorphic ICs are preserved when possible, but need to be flushed
+  // when they might be keeping a Context alive, or when the heap is about
+  // to be serialized.
+  flush_monomorphic_ics_ =
+      heap()->isolate()->context_exit_happened() || Serializer::enabled();
+
   // Rather than passing the tracer around we stash it in a static member
   // variable.
   tracer_ = tracer;
@@ -737,7 +744,7 @@
   // it in place to its left substring.  Return the updated value.
   //
   // Here we assume that if we change *p, we replace it with a heap object
-  // (ie, the left substring of a cons string is always a heap object).
+  // (i.e., the left substring of a cons string is always a heap object).
   //
   // The check performed is:
   //   object->IsConsString() && !object->IsSymbol() &&
@@ -881,7 +888,9 @@
   static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
+    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
+        && (target->ic_state() == MEGAMORPHIC ||
+            heap->mark_compact_collector()->flush_monomorphic_ics_)) {
       IC::Clear(rinfo->pc());
       target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     } else {
@@ -1196,7 +1205,7 @@
       return;
     }
     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
-    // Flush code or set age on both ascii and two byte code.
+    // Flush code or set age on both ASCII and two byte code.
     UpdateRegExpCodeAgeAndFlush(heap, re, true);
     UpdateRegExpCodeAgeAndFlush(heap, re, false);
     // Visit the fields of the RegExp, including the updated FixedArray.
@@ -1614,9 +1623,7 @@
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
-    if (FLAG_cleanup_code_caches_at_gc) {
-      map->ClearCodeCache(heap());
-    }
+    ClearCacheOnMap(map);
 
     // When map collection is enabled we have to mark through map's transitions
     // in a special way to make transition links weak.
@@ -1641,8 +1648,8 @@
   MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
   if (!mark.Get()) {
     mark.Set();
-    MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
-                                    prototype_transitions->Size());
+    MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+                                          prototype_transitions->Size());
   }
 
   Object** raw_descriptor_array_slot =
@@ -1756,7 +1763,7 @@
     MarkBit markbit = Marking::MarkBitFrom(object);
     if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
       Marking::GreyToBlack(markbit);
-      MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
       marking_deque->PushBlack(object);
       if (marking_deque->IsFull()) return;
     }
@@ -1808,7 +1815,7 @@
       Marking::GreyToBlack(markbit);
       Address addr = cell_base + offset * kPointerSize;
       HeapObject* object = HeapObject::FromAddress(addr);
-      MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
       marking_deque->PushBlack(object);
       if (marking_deque->IsFull()) return;
       offset += 2;
@@ -2309,28 +2316,21 @@
               cached_map,
               SKIP_WRITE_BARRIER);
         }
+        new_number_of_transitions++;
       }
+    }
 
-      // Fill slots that became free with undefined value.
-      Object* undefined = heap()->undefined_value();
-      for (int i = new_number_of_transitions * step;
-           i < number_of_transitions * step;
-           i++) {
-        // The undefined object is on a page that is never compacted and never
-        // in new space so it is OK to skip the write barrier.  Also it's a
-        // root.
-        prototype_transitions->set_unchecked(heap_,
-                                             header + i,
-                                             undefined,
-                                             SKIP_WRITE_BARRIER);
-
-        Object** undefined_slot =
-            prototype_transitions->data_start() + i;
-        RecordSlot(undefined_slot, undefined_slot, undefined);
-      }
+    if (new_number_of_transitions != number_of_transitions) {
       map->SetNumberOfProtoTransitions(new_number_of_transitions);
     }
 
+    // Fill slots that became free with undefined value.
+    for (int i = new_number_of_transitions * step;
+         i < number_of_transitions * step;
+         i++) {
+      prototype_transitions->set_undefined(heap_, header + i);
+    }
+
     // Follow the chain of back pointers to find the prototype.
     Map* current = map;
     while (current->IsMap()) {
@@ -3630,6 +3630,9 @@
           PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
                  reinterpret_cast<intptr_t>(p));
         }
+        // Adjust unswept free bytes because releasing a page expects said
+        // counter to be accurate for unswept pages.
+        space->IncreaseUnsweptFreeBytes(p);
         space->ReleasePage(p);
         continue;
       }
@@ -3641,7 +3644,7 @@
         PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
                reinterpret_cast<intptr_t>(p));
       }
-      space->MarkPageForLazySweeping(p);
+      space->IncreaseUnsweptFreeBytes(p);
       continue;
     }
 
diff --git a/src/mark-compact.h b/src/mark-compact.h
index e0a7d94..85a4a3b 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -198,7 +198,7 @@
     ASSERT(object->IsHeapObject());
     if (IsFull()) {
       Marking::BlackToGrey(object);
-      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
       SetOverflowed();
     } else {
       array_[top_] = object;
@@ -407,7 +407,7 @@
   // object from the forwarding address of the previous live object in the
   // page as input, and is updated to contain the offset to be used for the
   // next live object in the same page.  For spaces using a different
-  // encoding (ie, contiguous spaces), the offset parameter is ignored.
+  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
   typedef void (*EncodingFunction)(Heap* heap,
                                    HeapObject* old_object,
                                    int object_size,
@@ -580,6 +580,8 @@
 
   bool collect_maps_;
 
+  bool flush_monomorphic_ics_;
+
   // A pointer to the current stack-allocated GC tracer object during a full
   // collection (NULL before and after).
   GCTracer* tracer_;
@@ -622,10 +624,16 @@
 
   void AfterMarking();
 
+  // Marks the object black and pushes it on the marking stack.
+  // This is for non-incremental marking.
   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
 
+  // Marks the object black.  This is for non-incremental marking.
   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
 
+  // Clears the cache of ICs related to this map.
+  INLINE(void ClearCacheOnMap(Map* map));
+
   void ProcessNewlyMarkedObject(HeapObject* obj);
 
   // Creates back pointers for all map transitions, stores them in
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 0788e73..cc21509 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
@@ -78,6 +78,16 @@
 }
 
 
+int FPURegister::ToAllocationIndex(FPURegister reg) {
+  ASSERT(reg.code() % 2 == 0);
+  ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+  ASSERT(reg.is_valid());
+  ASSERT(!reg.is(kDoubleRegZero));
+  ASSERT(!reg.is(kLithiumScratchDouble));
+  return (reg.code() / 2);
+}
+
+
 // -----------------------------------------------------------------------------
 // RelocInfo.
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index b66ea0d..b1ffc45 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -182,12 +182,7 @@
       kNumReservedRegisters;
 
 
-  static int ToAllocationIndex(FPURegister reg) {
-    ASSERT(reg.code() % 2 == 0);
-    ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
-    ASSERT(reg.is_valid());
-    return (reg.code() / 2);
-  }
+  inline static int ToAllocationIndex(FPURegister reg);
 
   static FPURegister FromAllocationIndex(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
@@ -302,6 +297,14 @@
 const FPURegister f30 = { 30 };
 const FPURegister f31 = { 31 };
 
+// Register aliases.
+// cp is assumed to be a callee saved register.
+static const Register& kLithiumScratchReg = s3;  // Scratch register.
+static const Register& kLithiumScratchReg2 = s4;  // Scratch register.
+static const Register& kRootRegister = s6;  // Roots array pointer.
+static const Register& cp = s7;     // JavaScript context pointer.
+static const Register& fp = s8_fp;  // Alias for fp.
+static const DoubleRegister& kLithiumScratchDouble = f30;
 static const FPURegister& kDoubleRegZero = f28;
 
 // FPU (coprocessor 1) control registers.
@@ -667,7 +670,7 @@
   // Never use the int16_t b(l)cond version with a branch offset
   // instead of using the Label* version.
 
-  // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
+  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
   void j(int32_t target);
   void jal(int32_t target);
   void jalr(Register rs, Register rd = ra);
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 9e108c9..8461342 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -895,7 +895,7 @@
 
       // Initialize the FixedArray.
       // a1: constructor
-      // a3: number of elements in properties array (un-tagged)
+      // a3: number of elements in properties array (untagged)
       // t4: JSObject
       // t5: start of next object
       __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
@@ -1099,7 +1099,7 @@
   // ----------- S t a t e -------------
   //  -- a0: code entry
   //  -- a1: function
-  //  -- a2: reveiver_pointer
+  //  -- a2: receiver_pointer
   //  -- a3: argc
   //  -- s0: argv
   // -----------------------------------
@@ -1121,7 +1121,7 @@
 
     // Copy arguments to the stack in a loop.
     // a3: argc
-    // s0: argv, ie points to first arg
+    // s0: argv, i.e. points to first arg
     Label loop, entry;
     __ sll(t0, a3, kPointerSizeLog2);
     __ addu(t2, s0, t0);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 57091ef..96f406f 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -4061,7 +4061,7 @@
   // Registers:
   // a0: entry address
   // a1: function
-  // a2: reveiver
+  // a2: receiver
   // a3: argc
   //
   // Stack:
@@ -4103,7 +4103,7 @@
   // Registers:
   // a0: entry_address
   // a1: function
-  // a2: reveiver_pointer
+  // a2: receiver_pointer
   // a3: argc
   // s0: argv
   //
@@ -4170,7 +4170,7 @@
   // Registers:
   // a0: entry_address
   // a1: function
-  // a2: reveiver_pointer
+  // a2: receiver_pointer
   // a3: argc
   // s0: argv
   //
@@ -5006,9 +5006,9 @@
   STATIC_ASSERT(kAsciiStringTag == 4);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   // Find the code object based on the assumptions above.
-  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
-  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
+  __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   __ movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
 
@@ -6033,7 +6033,7 @@
 
 
   Label result_longer_than_two;
-  // Check for special case of two character ascii string, in which case
+  // Check for special case of two character ASCII string, in which case
   // we do a lookup in the symbol table first.
   __ li(t0, 2);
   __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
@@ -6164,7 +6164,7 @@
   __ And(t0, a1, Operand(kStringEncodingMask));
   __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
 
-  // Allocate and copy the resulting ascii string.
+  // Allocate and copy the resulting ASCII string.
   __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
 
   // Locate first character of substring to copy.
@@ -6491,7 +6491,7 @@
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
   __ Branch(&string_add_flat_result, lt, t2,
-           Operand(String::kMinNonFlatLength));
+           Operand(ConsString::kMinLength));
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
@@ -6508,7 +6508,7 @@
   }
   Label non_ascii, allocated, ascii_data;
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
+  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
   __ And(t4, t0, Operand(t1));
   __ And(t4, t4, Operand(kStringEncodingMask));
   __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
@@ -6543,7 +6543,7 @@
   __ Branch(&allocated);
 
   // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
   // Handle creating a flat result from either external or sequential strings.
   // Locate the first characters' locations.
   // a0: first string
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index d0a7af5..7d654f6 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -302,7 +302,7 @@
           return kRegisterType;
       };
       break;
-    // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
+    // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
     case REGIMM:
     case BEQ:
     case BNE:
@@ -337,7 +337,7 @@
     case SWC1:
     case SDC1:
       return kImmediateType;
-    // 26 bits immediate type instructions. eg: j imm26.
+    // 26 bits immediate type instructions. e.g.: j imm26.
     case J:
     case JAL:
       return kJumpType;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 210becb..d62a890 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -747,7 +747,7 @@
 
   // Say if the instruction should not be used in a branch delay slot.
   bool IsForbiddenInBranchDelay() const;
-  // Say if the instruction 'links'. eg: jal, bal.
+  // Say if the instruction 'links'. e.g. jal, bal.
   bool IsLinkingInstruction() const;
   // Say if the instruction is a break or a trap.
   bool IsTrap() const;
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 7394077..da3be4c 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -125,7 +125,7 @@
 // function.
 //
 // The live registers are:
-//   o a1: the JS function object being called (ie, ourselves)
+//   o a1: the JS function object being called (i.e. ourselves)
 //   o cp: our context
 //   o fp: our caller's frame pointer
 //   o sp: stack pointer
@@ -3655,7 +3655,7 @@
 
   // One-character separator case.
   __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
+  // Replace separator with its ASCII character value.
   __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator.
@@ -3666,7 +3666,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ascii char (in lower byte).
+  //   separator: Single separator ASCII char (in lower byte).
 
   // Copy the separator character to the result.
   __ sb(separator, MemOperand(result_pos));
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index de64007..0f31c2a 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1862,9 +1862,8 @@
 }
 
 
-// Branches to a label or falls through with this instance class-name adr
-// returned in temp reg, available for comparison by the caller. Trashes the
-// temp registers, but not the input. Only input and temp2 may alias.
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String>class_name,
@@ -1872,7 +1871,9 @@
                                Register temp,
                                Register temp2) {
   ASSERT(!input.is(temp));
-  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp.is(temp2));
+
   __ JumpIfSmi(input, is_false);
 
   if (class_name->IsEqualTo(CStrVector("Function"))) {
@@ -2884,7 +2885,7 @@
   __ mov(result, input);
   ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
   __ subu(result, zero_reg, input);
-  // Overflow if result is still negative, ie 0x80000000.
+  // Overflow if result is still negative, i.e. 0x80000000.
   DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
   __ bind(&done);
 }
@@ -3816,6 +3817,7 @@
 void LCodeGen::EmitNumberUntagD(Register input_reg,
                                 DoubleRegister result_reg,
                                 bool deoptimize_on_undefined,
+                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Register scratch = scratch0();
 
@@ -3845,6 +3847,12 @@
   }
   // Heap number to double register conversion.
   __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  if (deoptimize_on_minus_zero) {
+    __ mfc1(at, result_reg.low());
+    __ Branch(&done, ne, at, Operand(zero_reg));
+    __ mfc1(scratch, result_reg.high());
+    DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+  }
   __ Branch(&done);
 
   // Smi to double register conversion
@@ -3976,6 +3984,7 @@
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
+                   instr->hydrogen()->deoptimize_on_minus_zero(),
                    instr->environment());
 }
 
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 2a54681..e90fc4c 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -280,6 +280,7 @@
   void EmitNumberUntagD(Register input,
                         DoubleRegister result,
                         bool deoptimize_on_undefined,
+                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
index 279a95e..41b060d 100644
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,7 +34,6 @@
 namespace internal {
 
 static const Register kSavedValueRegister = kLithiumScratchReg;
-static const DoubleRegister kSavedDoubleValueRegister = kLithiumScratchDouble;
 
 LGapResolver::LGapResolver(LCodeGen* owner)
     : cgen_(owner),
@@ -175,9 +174,9 @@
   } else if (source->IsStackSlot()) {
     __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
   } else if (source->IsDoubleRegister()) {
-    __ mov_d(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
+    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
   } else if (source->IsDoubleStackSlot()) {
-    __ ldc1(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
+    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
   } else {
     UNREACHABLE();
   }
@@ -190,16 +189,16 @@
   ASSERT(in_cycle_);
   ASSERT(saved_destination_ != NULL);
 
-  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+  // Spilled value is in kSavedValueRegister or kLithiumScratchDouble.
   if (saved_destination_->IsRegister()) {
     __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
   } else if (saved_destination_->IsStackSlot()) {
     __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
   } else if (saved_destination_->IsDoubleRegister()) {
     __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
-            kSavedDoubleValueRegister);
+            kLithiumScratchDouble);
   } else if (saved_destination_->IsDoubleStackSlot()) {
-    __ sdc1(kSavedDoubleValueRegister,
+    __ sdc1(kLithiumScratchDouble,
             cgen_->ToMemOperand(saved_destination_));
   } else {
     UNREACHABLE();
@@ -239,8 +238,8 @@
           // Therefore we can't use 'at'.  It is OK if the read from the source
           // destroys 'at', since that happens before the value is read.
           // This uses only a single reg of the double reg-pair.
-          __ lwc1(kSavedDoubleValueRegister, source_operand);
-          __ swc1(kSavedDoubleValueRegister, destination_operand);
+          __ lwc1(kLithiumScratchDouble, source_operand);
+          __ swc1(kLithiumScratchDouble, destination_operand);
         } else {
           __ lw(at, source_operand);
           __ sw(at, destination_operand);
@@ -291,7 +290,7 @@
       ASSERT(destination->IsDoubleStackSlot());
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
-        // kSavedDoubleValueRegister was used to break the cycle,
+        // kLithiumScratchDouble was used to break the cycle,
         // but kSavedValueRegister is free.
         MemOperand source_high_operand =
             cgen_->ToHighMemOperand(source);
@@ -302,8 +301,8 @@
         __ lw(kSavedValueRegister, source_high_operand);
         __ sw(kSavedValueRegister, destination_high_operand);
       } else {
-        __ ldc1(kSavedDoubleValueRegister, source_operand);
-        __ sdc1(kSavedDoubleValueRegister, destination_operand);
+        __ ldc1(kLithiumScratchDouble, source_operand);
+        __ sdc1(kLithiumScratchDouble, destination_operand);
       }
     }
   } else {
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index b66b98f..2c098fe 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1554,7 +1554,7 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+  return new LClassOfTestAndBranch(UseRegister(instr->value()),
                                    TempRegister());
 }
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 4517fe1..141d2a8 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -444,8 +444,10 @@
   xor_(reg0, reg0, at);
 
   // hash = hash * 2057;
-  li(scratch, Operand(2057));
-  mul(reg0, reg0, scratch);
+  sll(scratch, reg0, 11);
+  sll(at, reg0, 3);
+  addu(reg0, reg0, at);
+  addu(reg0, reg0, scratch);
 
   // hash = hash ^ (hash >> 16);
   srl(at, reg0, 16);
@@ -1176,7 +1178,7 @@
   Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
 
   // We know the exponent is smaller than 30 (biased).  If it is less than
-  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
   // it rounds to zero.
   const uint32_t zero_exponent =
       (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index eb9cf6e..cdcee00 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -51,16 +51,6 @@
 // MIPS generated code calls C code, it must be via t9 register.
 
 
-// Register aliases.
-// cp is assumed to be a callee saved register.
-const Register kLithiumScratchReg = s3;  // Scratch register.
-const Register kLithiumScratchReg2 = s4;  // Scratch register.
-const Register kCondReg = s5;  // Simulated (partial) condition code for mips.
-const Register kRootRegister = s6;  // Roots array pointer.
-const Register cp = s7;     // JavaScript context pointer.
-const Register fp = s8_fp;  // Alias for fp.
-const DoubleRegister kLithiumScratchDouble = f30;  // Double scratch register.
-
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
   // No special flags.
@@ -340,7 +330,7 @@
                       Register scratch3,
                       Label* object_is_white_and_not_data);
 
-  // Detects conservatively whether an object is data-only, ie it does need to
+  // Detects conservatively whether an object is data-only, i.e. it does need to
   // be scanned by the garbage collector.
   void JumpIfDataObject(Register value,
                         Register scratch,
@@ -421,7 +411,7 @@
   }
 
   // Check if the given instruction is a 'type' marker.
-  // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
   // nop(type)). These instructions are generated to mark special location in
   // the code, like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
@@ -1132,7 +1122,7 @@
 
   // Calls an API function.  Allocates HandleScope, extracts returned value
   // from handle and propagates exceptions.  Restores context.  stack_space
-  // - space to be unwound on exit (includes the call js arguments space and
+  // - space to be unwound on exit (includes the call JS arguments space and
   // the additional space allocated for the fast call).
   void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 191c2ca..a158f04 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -2291,7 +2291,7 @@
 }
 
 
-// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
   // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
@@ -2614,7 +2614,7 @@
 }
 
 
-// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
   int32_t current_pc = get_pc();
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index bf01861..97a58c7 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -565,11 +565,11 @@
                                       int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
-  //  -- sp[4]              : callee js function
+  //  -- sp[4]              : callee JS function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last js argument
+  //  -- sp[12]             : last JS argument
   //  -- ...
-  //  -- sp[(argc + 3) * 4] : first js argument
+  //  -- sp[(argc + 3) * 4] : first JS argument
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
@@ -587,7 +587,7 @@
     __ li(t2, call_data);
   }
 
-  // Store js function and call data.
+  // Store JS function and call data.
   __ sw(t1, MemOperand(sp, 1 * kPointerSize));
   __ sw(t2, MemOperand(sp, 2 * kPointerSize));
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 5c68ddf..3a667a4 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -388,7 +388,7 @@
   CHECK(this->first()->IsString());
   CHECK(this->second() == GetHeap()->empty_string() ||
         this->second()->IsString());
-  CHECK(this->length() >= String::kMinNonFlatLength);
+  CHECK(this->length() >= ConsString::kMinLength);
   if (this->IsFlat()) {
     // A flat cons can only be created by String::SlowTryFlatten.
     // Afterwards, the first part may be externalized.
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 2e9ccc1..7308fb2 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1391,11 +1391,11 @@
     case JS_VALUE_TYPE:
       return JSValue::kSize;
     case JS_ARRAY_TYPE:
-      return JSValue::kSize;
+      return JSArray::kSize;
     case JS_WEAK_MAP_TYPE:
       return JSWeakMap::kSize;
     case JS_REGEXP_TYPE:
-      return JSValue::kSize;
+      return JSRegExp::kSize;
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
       return JSObject::kHeaderSize;
     case JS_MESSAGE_OBJECT_TYPE:
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index e558e58..eca9bab 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -627,7 +627,7 @@
 
 
 // This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ascii characters
+// Since the string can also be in two-byte encoding, non-ASCII characters
 // will be ignored in the output.
 char* String::ToAsciiArray() {
   // Static so that subsequent calls frees previously allocated space.
diff --git a/src/objects.cc b/src/objects.cc
index abeeec9..0941e79 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -993,7 +993,8 @@
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
   if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
-    MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+                                               new_size - size);
   }
   return true;
 }
@@ -1037,7 +1038,8 @@
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
   if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
-    MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+                                               new_size - size);
   }
   return true;
 }
@@ -3460,7 +3462,8 @@
   current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
                                      instance_size_delta);
   if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
-    MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta);
+    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
+                                               -instance_size_delta);
   }
 
 
@@ -10885,7 +10888,7 @@
 
   // Optimized for symbol key. Knowledge of the key type allows:
   // 1. Move the check if the key is a symbol out of the loop.
-  // 2. Avoid comparing hash codes in symbol to symbol comparision.
+  // 2. Avoid comparing hash codes in symbol to symbol comparison.
   // 3. Detect a case when a dictionary key is not a symbol but the key is.
   //    In case of positive result the dictionary key may be replaced by
   //    the symbol with minimal performance penalty. It gives a chance to
diff --git a/src/objects.h b/src/objects.h
index 791aeb3..0d5bec5 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -217,7 +217,7 @@
 // encoding is considered TWO_BYTE.  It is not mentioned in the name.  ASCII
 // encoding is mentioned explicitly in the name.  Likewise, the default
 // representation is considered sequential.  It is not mentioned in the
-// name.  The other representations (eg, CONS, EXTERNAL) are explicitly
+// name.  The other representations (e.g. CONS, EXTERNAL) are explicitly
 // mentioned.  Finally, the string is either a SYMBOL_TYPE (if it is a
 // symbol) or a STRING_TYPE (if it is not a symbol).
 //
@@ -492,7 +492,7 @@
 STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
 
 // If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ascii data.
+// string actually contains ASCII data.
 const uint32_t kAsciiDataHintMask = 0x08;
 const uint32_t kAsciiDataHintTag = 0x08;
 
@@ -1086,7 +1086,7 @@
 
 
 // Heap objects typically have a map pointer in their first word.  However,
-// during GC other data (eg, mark bits, forwarding addresses) is sometimes
+// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
 // encoded in the first word.  The class MapWord is an abstraction of the
 // value in a heap object's first word.
 class MapWord BASE_EMBEDDED {
@@ -1105,7 +1105,7 @@
 
   // True if this map word is a forwarding address for a scavenge
   // collection.  Only valid during a scavenge collection (specifically,
-  // when all map words are heap object pointers, ie. not during a full GC).
+  // when all map words are heap object pointers, i.e. not during a full GC).
   inline bool IsForwardingAddress();
 
   // Create a map word from a forwarding address.
@@ -4492,6 +4492,11 @@
     return elements_kind() == DICTIONARY_ELEMENTS;
   }
 
+  inline bool has_slow_elements_kind() {
+    return elements_kind() == DICTIONARY_ELEMENTS
+        || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+  }
+
   static bool IsValidElementsTransition(ElementsKind from_kind,
                                         ElementsKind to_kind);
 
@@ -6489,7 +6494,7 @@
   inline String* GetUnderlying();
 
   // Mark the string as an undetectable object. It only applies to
-  // ascii and two byte string types.
+  // ASCII and two byte string types.
   bool MarkAsUndetectable();
 
   // Return a substring.
@@ -6586,14 +6591,11 @@
   // value into an array index.
   static const int kMaxArrayIndexSize = 10;
 
-  // Max ascii char code.
+  // Max ASCII char code.
   static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
   static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
   static const int kMaxUC16CharCode = 0xffff;
 
-  // Minimum length for a cons string.
-  static const int kMinNonFlatLength = 13;
-
   // Mask constant for checking if a string has a computed hash code
   // and if it is an array index.  The least significant bit indicates
   // whether a hash code has been computed.  If the hash code has been
@@ -6772,8 +6774,8 @@
 };
 
 
-// The AsciiString class captures sequential ascii string objects.
-// Each character in the AsciiString is an ascii character.
+// The AsciiString class captures sequential ASCII string objects.
+// Each character in the AsciiString is an ASCII character.
 class SeqAsciiString: public SeqString {
  public:
   static const bool kHasAsciiEncoding = true;
diff --git a/src/parser.cc b/src/parser.cc
index 777436e..35cc1c9 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -3757,7 +3757,7 @@
                              RelocInfo::kNoPosition,
                              FunctionLiteral::ANONYMOUS_EXPRESSION,
                              CHECK_OK);
-    // Allow any number of parameters for compatiabilty with JSC.
+    // Allow any number of parameters for compatibilty with JSC.
     // Specification only allows zero parameters for get and one for set.
     ObjectLiteral::Property* property =
         new(zone()) ObjectLiteral::Property(is_getter, value);
diff --git a/src/parser.h b/src/parser.h
index 146d7bb..16c2eff 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -186,7 +186,7 @@
 // ----------------------------------------------------------------------------
 // REGEXP PARSING
 
-// A BuffferedZoneList is an automatically growing list, just like (and backed
+// A BufferedZoneList is an automatically growing list, just like (and backed
 // by) a ZoneList, that is optimized for the case of adding and removing
 // a single element. The last element added is stored outside the backing list,
 // and if no more than one element is ever added, the ZoneList isn't even
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 9b34de9..f4c20ae 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -114,7 +114,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 7d0d8d0..65bd720 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -128,7 +128,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index a3cdc03..5a1c0d1 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -326,7 +326,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -1151,6 +1151,9 @@
     // occuring during signal delivery.
     useconds_t interval = interval_ * 1000 - 100;
     if (full_or_half == HALF_INTERVAL) interval /= 2;
+#if defined(ANDROID)
+    usleep(interval);
+#else
     int result = usleep(interval);
 #ifdef DEBUG
     if (result != 0 && errno != EINTR) {
@@ -1160,8 +1163,9 @@
               errno);
       ASSERT(result == 0 || errno == EINTR);
     }
-#endif
+#endif  // DEBUG
     USE(result);
+#endif  // ANDROID
   }
 
   const int vm_tgid_;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 417fb11..369c3e4 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -75,7 +75,7 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on MacOSX since a ptread_t is
+// 0 is never a valid thread id on MacOSX since a pthread_t is
 // a pointer.
 static const pthread_t kNoThread = (pthread_t) 0;
 
@@ -103,7 +103,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 6f582d4..a01c08d 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -146,7 +146,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index d55ea89..08bec93 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -139,7 +139,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index c439ab9..5c000e6 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -198,7 +198,7 @@
 
 // ----------------------------------------------------------------------------
 // The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
+// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
 // timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
 // January 1, 1970.
 
@@ -776,7 +776,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, ie, not all addresses in
+// and verification).  The estimate is conservative, i.e., not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 99f3a37..b6fb3c5 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -133,7 +133,7 @@
     subject_ptr = slice->parent();
     slice_offset = slice->offset();
   }
-  // Ensure that an underlying string has the same ascii-ness.
+  // Ensure that an underlying string has the same ASCII-ness.
   bool is_ascii = subject_ptr->IsAsciiRepresentation();
   ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
   // String is now either Sequential or External
diff --git a/src/regexp.js b/src/regexp.js
index 596c185..00dd7f1 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -436,8 +436,8 @@
   // value is set in SpiderMonkey, the value it is set to is coerced to a
   // boolean.  We mimic that behavior with a slight difference: in SpiderMonkey
   // the value of the expression 'RegExp.multiline = null' (for instance) is the
-  // boolean false (ie, the value after coercion), while in V8 it is the value
-  // null (ie, the value before coercion).
+  // boolean false (i.e., the value after coercion), while in V8 it is the value
+  // null (i.e., the value before coercion).
 
   // Getter and setter for multiline.
   var multiline = false;
diff --git a/src/runtime.cc b/src/runtime.cc
index fb36213..b9ec719 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -3178,7 +3178,7 @@
   Address end_of_string = answer->address() + string_size;
   isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
   if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
-    MemoryChunk::IncrementLiveBytes(answer->address(), -delta);
+    MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
   }
 
   return *answer;
@@ -3233,6 +3233,79 @@
 }
 
 
+Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
+                                                       Handle<String> subject,
+                                                       Handle<String> search,
+                                                       Handle<String> replace,
+                                                       bool* found,
+                                                       int recursion_limit) {
+  if (recursion_limit == 0) return Handle<String>::null();
+  if (subject->IsConsString()) {
+    ConsString* cons = ConsString::cast(*subject);
+    Handle<String> first = Handle<String>(cons->first());
+    Handle<String> second = Handle<String>(cons->second());
+    Handle<String> new_first =
+        StringReplaceOneCharWithString(isolate,
+                                       first,
+                                       search,
+                                       replace,
+                                       found,
+                                       recursion_limit - 1);
+    if (*found) return isolate->factory()->NewConsString(new_first, second);
+    if (new_first.is_null()) return new_first;
+
+    Handle<String> new_second =
+        StringReplaceOneCharWithString(isolate,
+                                       second,
+                                       search,
+                                       replace,
+                                       found,
+                                       recursion_limit - 1);
+    if (*found) return isolate->factory()->NewConsString(first, new_second);
+    if (new_second.is_null()) return new_second;
+
+    return subject;
+  } else {
+    int index = StringMatch(isolate, subject, search, 0);
+    if (index == -1) return subject;
+    *found = true;
+    Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
+    Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
+    Handle<String> second =
+        isolate->factory()->NewSubString(subject, index + 1, subject->length());
+    return isolate->factory()->NewConsString(cons1, second);
+  }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
+  ASSERT(args.length() == 3);
+  HandleScope scope(isolate);
+  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_ARG_CHECKED(String, search, 1);
+  CONVERT_ARG_CHECKED(String, replace, 2);
+
+  // If the cons string tree is too deep, we simply abort the recursion and
+  // retry with a flattened subject string.
+  const int kRecursionLimit = 0x1000;
+  bool found = false;
+  Handle<String> result =
+      Runtime::StringReplaceOneCharWithString(isolate,
+                                              subject,
+                                              search,
+                                              replace,
+                                              &found,
+                                              kRecursionLimit);
+  if (!result.is_null()) return *result;
+  return *Runtime::StringReplaceOneCharWithString(isolate,
+                                                  FlattenGetString(subject),
+                                                  search,
+                                                  replace,
+                                                  &found,
+                                                  kRecursionLimit);
+}
+
+
 // Perform string match of pattern on subject, starting at start index.
 // Caller must ensure that 0 <= start_index <= sub->length(),
 // and should check that pat->length() + start_index <= sub->length().
@@ -5926,8 +5999,8 @@
   //
   // Allocate the resulting string.
   //
-  // NOTE: This assumes that the upper/lower case of an ascii
-  // character is also ascii.  This is currently the case, but it
+  // NOTE: This assumes that the upper/lower case of an ASCII
+  // character is also ASCII.  This is currently the case, but it
   // might break in the future if we implement more context and locale
   // dependent upper/lower conversions.
   Object* o;
@@ -6027,9 +6100,9 @@
 // This function is only useful when it can be inlined and the
 // boundaries are statically known.
 // Requires: all bytes in the input word and the boundaries must be
-// ascii (less than 0x7F).
+// ASCII (less than 0x7F).
 static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
-  // Every byte in an ascii string is less than or equal to 0x7F.
+  // Every byte in an ASCII string is less than or equal to 0x7F.
   ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
   // Use strict inequalities since in edge cases the function could be
   // further simplified.
@@ -6157,10 +6230,10 @@
   // Assume that the string is not empty; we need this assumption later
   if (length == 0) return s;
 
-  // Simpler handling of ascii strings.
+  // Simpler handling of ASCII strings.
   //
-  // NOTE: This assumes that the upper/lower case of an ascii
-  // character is also ascii.  This is currently the case, but it
+  // NOTE: This assumes that the upper/lower case of an ASCII
+  // character is also ASCII.  This is currently the case, but it
   // might break in the future if we implement more context and locale
   // dependent upper/lower conversions.
   if (s->IsSeqAsciiString()) {
@@ -6323,7 +6396,7 @@
 }
 
 
-// Copies ascii characters to the given fixed array looking up
+// Copies ASCII characters to the given fixed array looking up
 // one-char strings in the cache. Gives up on the first char that is
 // not in the cache and fills the remainder with smi zeros. Returns
 // the length of the successfully copied prefix.
@@ -7422,7 +7495,7 @@
 }
 
 // Fast version of Math.pow if we know that y is not an integer and y is not
-// -0.5 or 0.5.  Used as slow case from fullcodegen.
+// -0.5 or 0.5.  Used as slow case from full codegen.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -7465,7 +7538,7 @@
 
   // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
   // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
-  // agument holds for 32-bit smis).
+  // argument holds for 32-bit smis).
   if (!sign && exponent < kSmiValueSize - 2) {
     return Smi::FromInt(static_cast<int>(value + 0.5));
   }
@@ -9355,7 +9428,7 @@
   CONVERT_ARG_CHECKED(String, source, 0);
 
   source = Handle<String>(source->TryFlattenGetString());
-  // Optimized fast case where we only have ascii characters.
+  // Optimized fast case where we only have ASCII characters.
   Handle<Object> result;
   if (source->IsSeqAsciiString()) {
     result = JsonParser<true>::Parse(source);
@@ -10257,7 +10330,7 @@
 
 
 // DefineAccessor takes an optional final argument which is the
-// property attributes (eg, DONT_ENUM, DONT_DELETE).  IMPORTANT: due
+// property attributes (e.g. DONT_ENUM, DONT_DELETE).  IMPORTANT: due
 // to the way accessors are implemented, it is set for both the getter
 // and setter on the first call to DefineAccessor and ignored on
 // subsequent calls.
@@ -11089,7 +11162,7 @@
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
 
-  // Allocate and initialize a JSObject with all the content of theis function
+  // Allocate and initialize a JSObject with all the content of this function
   // closure.
   Handle<JSObject> closure_scope =
       isolate->factory()->NewJSObject(isolate->object_function());
@@ -12257,7 +12330,7 @@
     // because using
     //   instances->set(i, *GetScriptWrapper(script))
     // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
-    // already have deferenced the instances handle.
+    // already have dereferenced the instances handle.
     Handle<JSValue> wrapper = GetScriptWrapper(script);
     instances->set(i, *wrapper);
   }
diff --git a/src/runtime.h b/src/runtime.h
index c915cf3..c0c7b13 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -197,6 +197,7 @@
   F(StringLocaleCompare, 2, 1) \
   F(SubString, 3, 1) \
   F(StringReplaceRegExpWithString, 4, 1) \
+  F(StringReplaceOneCharWithString, 3, 1) \
   F(StringMatch, 3, 1) \
   F(StringTrim, 3, 1) \
   F(StringToArray, 2, 1) \
@@ -629,6 +630,13 @@
   // Get the intrinsic function with the given FunctionId.
   static const Function* FunctionForId(FunctionId id);
 
+  static Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
+                                                       Handle<String> subject,
+                                                       Handle<String> search,
+                                                       Handle<String> replace,
+                                                       bool* found,
+                                                       int recursion_limit);
+
   // General-purpose helper functions for runtime system.
   static int StringMatch(Isolate* isolate,
                          Handle<String> sub,
diff --git a/src/serialize.cc b/src/serialize.cc
index d0a1a63..e9be249 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1364,6 +1364,13 @@
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
+  if (heap_object->IsMap()) {
+    // The code-caches link to context-specific code objects, which
+    // the startup and context serializes cannot currently handle.
+    ASSERT(Map::cast(heap_object)->code_cache() ==
+           heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
+  }
+
   int root_index;
   if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
     PutRoot(root_index, heap_object, how_to_code, where_to_point);
diff --git a/src/serialize.h b/src/serialize.h
index ff10905..6efb8ee 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -228,7 +228,7 @@
     kFromStart = 0x20,              // Object is described relative to start.
     // 0x21-0x28                       One per space.
     // 0x29-0x2f                       Free.
-    // 0x30-0x3f                       Used by misc tags below.
+    // 0x30-0x3f                       Used by misc. tags below.
     kPointedToMask = 0x3f
   };
 
@@ -359,8 +359,8 @@
   // Fills in some heap data in an area from start to end (non-inclusive).  The
   // space id is used for the write barrier.  The object_address is the address
   // of the object we are writing into, or NULL if we are not writing into an
-  // object, ie if we are writing a series of tagged values that are not on the
-  // heap.
+  // object, i.e. if we are writing a series of tagged values that are not on
+  // the heap.
   void ReadChunk(
       Object** start, Object** end, int space, Address object_address);
   HeapObject* GetAddressFromStart(int space);
@@ -632,7 +632,7 @@
   // Serialize the current state of the heap.  The order is:
   // 1) Strong references.
   // 2) Partial snapshot cache.
-  // 3) Weak references (eg the symbol table).
+  // 3) Weak references (e.g. the symbol table).
   virtual void SerializeStrongReferences();
   virtual void SerializeObject(Object* o,
                                HowToCode how_to_code,
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 1cfdc13..d0cddeb 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -248,7 +248,7 @@
 
 
 // Try linear allocation in the page of alloc_info's allocation top.  Does
-// not contain slow case logic (eg, move to the next page or try free list
+// not contain slow case logic (e.g. move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
 // the paged spaces.
 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
@@ -332,7 +332,7 @@
   string->set_length(length);
   if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
     int delta = static_cast<int>(old_top - allocation_info_.top);
-    MemoryChunk::IncrementLiveBytes(string->address(), -delta);
+    MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
   }
 }
 
diff --git a/src/spaces.cc b/src/spaces.cc
index c8e94dd..d5b4d81 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -649,6 +649,17 @@
 #endif
 
 // -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
+    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+  }
+  chunk->IncrementLiveBytes(by);
+}
+
+// -----------------------------------------------------------------------------
 // PagedSpace implementation
 
 PagedSpace::PagedSpace(Heap* heap,
@@ -765,6 +776,8 @@
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
     ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
+  } else {
+    DecreaseUnsweptFreeBytes(page);
   }
 
   if (Page::FromAllocationTop(allocation_info_.top) == page) {
@@ -2112,7 +2125,7 @@
         PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
                reinterpret_cast<intptr_t>(p));
       }
-      unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
+      DecreaseUnsweptFreeBytes(p);
       freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
     }
     p = next_page;
@@ -2513,7 +2526,7 @@
     MarkBit mark_bit = Marking::MarkBitFrom(object);
     if (mark_bit.Get()) {
       mark_bit.Clear();
-      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
       previous = current;
       current = current->next_page();
     } else {
diff --git a/src/spaces.h b/src/spaces.h
index 41c3ef9..f49873a 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -295,7 +295,7 @@
 
 // MemoryChunk represents a memory region owned by a specific space.
 // It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accomodate
+// 1MB aligned. Start of the body is aligned so it can accommodate
 // any heap object.
 class MemoryChunk {
  public:
@@ -472,10 +472,13 @@
     ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
     return live_byte_count_;
   }
-  static void IncrementLiveBytes(Address address, int by) {
+
+  static void IncrementLiveBytesFromGC(Address address, int by) {
     MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
   }
 
+  static void IncrementLiveBytesFromMutator(Address address, int by);
+
   static const intptr_t kAlignment =
       (static_cast<uintptr_t>(1) << kPageSizeBits);
 
@@ -1181,11 +1184,11 @@
 
 
 // An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (ie, not
+// The 'capacity' of a space is the number of object-area bytes (i.e., not
 // including page bookkeeping structures) currently in the space. The 'size'
 // of a space is the number of allocated bytes, the 'waste' in the space is
 // the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (eg, small blocks due
+// allocation without reorganizing the space via a GC (e.g. small blocks due
 // to internal fragmentation, top of page areas in map space), and the bytes
 // 'available' is the number of unallocated bytes that are not waste.  The
 // capacity is the sum of size, waste, and available.
@@ -1198,7 +1201,7 @@
  public:
   AllocationStats() { Clear(); }
 
-  // Zero out all the allocation statistics (ie, no capacity).
+  // Zero out all the allocation statistics (i.e., no capacity).
   void Clear() {
     capacity_ = 0;
     size_ = 0;
@@ -1210,7 +1213,7 @@
     waste_ = 0;
   }
 
-  // Reset the allocation statistics (ie, available = capacity with no
+  // Reset the allocation statistics (i.e., available = capacity with no
   // wasted or allocated bytes).
   void Reset() {
     size_ = 0;
@@ -1341,7 +1344,7 @@
   // starting at 'start' is placed on the free list.  The return value is the
   // number of bytes that have been lost due to internal fragmentation by
   // freeing the block.  Bookkeeping information will be written to the block,
-  // ie, its contents will be destroyed.  The start address should be word
+  // i.e., its contents will be destroyed.  The start address should be word
   // aligned, and the size should be a non-zero multiple of the word size.
   int Free(Address start, int size_in_bytes);
 
@@ -1563,10 +1566,20 @@
     first_unswept_page_ = first;
   }
 
-  void MarkPageForLazySweeping(Page* p) {
+  void IncrementUnsweptFreeBytes(int by) {
+    unswept_free_bytes_ += by;
+  }
+
+  void IncreaseUnsweptFreeBytes(Page* p) {
+    ASSERT(ShouldBeSweptLazily(p));
     unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
   }
 
+  void DecreaseUnsweptFreeBytes(Page* p) {
+    ASSERT(ShouldBeSweptLazily(p));
+    unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
+  }
+
   bool AdvanceSweeper(intptr_t bytes_to_sweep);
 
   bool IsSweepingComplete() {
diff --git a/src/string.js b/src/string.js
index 3608bac..2d68961 100644
--- a/src/string.js
+++ b/src/string.js
@@ -244,6 +244,15 @@
 
   // Convert the search argument to a string and search for it.
   search = TO_STRING_INLINE(search);
+  if (search.length == 1 &&
+      subject.length > 0xFF &&
+      IS_STRING(replace) &&
+      %StringIndexOf(replace, '$', 0) < 0) {
+    // Searching by traversing a cons string tree and replace with cons of
+    // slices works only when the replaced string is a single character, being
+    // replaced by a simple string and only pays off for long strings.
+    return %StringReplaceOneCharWithString(subject, search, replace);
+  }
   var start = %StringIndexOf(subject, search, 0);
   if (start < 0) return subject;
   var end = start + search.length;
diff --git a/src/unicode.cc b/src/unicode.cc
index 6e0ac1a..147f716 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -210,7 +210,7 @@
 uchar Utf8::CalculateValue(const byte* str,
                            unsigned length,
                            unsigned* cursor) {
-  // We only get called for non-ascii characters.
+  // We only get called for non-ASCII characters.
   if (length == 1) {
     *cursor += 1;
     return kBadChar;
@@ -286,8 +286,8 @@
   }
   const byte* data = reinterpret_cast<const byte*>(str.data());
   if (data[offset] <= kMaxOneByteChar) {
-    // The next character is an ascii char so we scan forward over
-    // the following ascii characters and return the next pure ascii
+    // The next character is an ASCII char so we scan forward over
+    // the following ASCII characters and return the next pure ASCII
     // substring
     const byte* result = data + offset;
     offset++;
@@ -297,13 +297,13 @@
     *offset_ptr = offset;
     return result;
   } else {
-    // The next character is non-ascii so we just fill the buffer
+    // The next character is non-ASCII so we just fill the buffer
     unsigned cursor = 0;
     unsigned chars_read = 0;
     while (offset < str.length()) {
       uchar c = data[offset];
       if (c <= kMaxOneByteChar) {
-        // Fast case for ascii characters
+        // Fast case for ASCII characters
         if (!CharacterStream::EncodeAsciiCharacter(c,
                                                    buffer,
                                                    capacity,
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 3881d66..fd8d536 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -154,7 +154,7 @@
 
 bool ThreadManager::RestoreThread() {
   ASSERT(IsLockedByCurrentThread());
-  // First check whether the current thread has been 'lazily archived', ie
+  // First check whether the current thread has been 'lazily archived', i.e.
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
   if (lazily_archived_thread_.Equals(ThreadId::Current())) {
diff --git a/src/v8utils.cc b/src/v8utils.cc
index bf0e05d..042a60f 100644
--- a/src/v8utils.cc
+++ b/src/v8utils.cc
@@ -316,7 +316,7 @@
   for (const char* p = data_; p < end; p++) {
     char c = *p;
     if ((c & 0x80) != 0) {
-      // Non-ascii detected:
+      // Non-ASCII detected:
       is_ascii = false;
 
       // Report the error and abort if appropriate:
@@ -329,7 +329,7 @@
                c, filename_, line_no, char_no);
 
         // Allow for some context up to kNumberOfLeadingContextChars chars
-        // before the offending non-ascii char to help the user see where
+        // before the offending non-ASCII char to help the user see where
         // the offending char is.
         const int kNumberOfLeadingContextChars = 10;
         const char* err_context = p - kNumberOfLeadingContextChars;
@@ -345,7 +345,7 @@
         OS::Abort();
       }
 
-      break;  // Non-ascii detected.  No need to continue scanning.
+      break;  // Non-ASCII detected.  No need to continue scanning.
     }
     if (c == '\n') {
       start_of_line = p;
diff --git a/src/version.cc b/src/version.cc
index 9553585..adf007c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     8
-#define BUILD_NUMBER      6
+#define BUILD_NUMBER      7
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 03a5170..d306101 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2357,6 +2357,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
+  __ xor_(r8, r8);
   __ testq(rbx, rbx);
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
@@ -2450,16 +2451,13 @@
   Label parameters_loop, parameters_test;
 
   // Load tagged parameter count into r9.
-  __ movq(r9, Operand(rsp, 1 * kPointerSize));
+  __ Integer32ToSmi(r9, rbx);
   __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
-  __ addq(r8, Operand(rsp, 3 * kPointerSize));
+  __ addq(r8, Operand(rsp, 1 * kPointerSize));
   __ subq(r8, r9);
   __ Move(r11, factory->the_hole_value());
   __ movq(rdx, rdi);
-  __ SmiToInteger64(kScratchRegister, r9);
-  __ lea(rdi, Operand(rdi, kScratchRegister,
-                      times_pointer_size,
-                      kParameterMapHeaderSize));
+  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
   // r9 = loop variable (tagged)
   // r8 = mapping index (tagged)
   // r11 = the hole value
@@ -2495,9 +2493,8 @@
   Label arguments_loop, arguments_test;
   __ movq(r8, rbx);
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-  // Untag rcx and r8 for the loop below.
+  // Untag rcx for the loop below.
   __ SmiToInteger64(rcx, rcx);
-  __ SmiToInteger64(r8, r8);
   __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
   __ subq(rdx, kScratchRegister);
   __ jmp(&arguments_test, Label::kNear);
@@ -2771,7 +2768,7 @@
                          kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ascii string.  None of the following
+  // Any other flat string must be a flat ASCII string.  None of the following
   // string type tests will succeed if subject is not a string or a short
   // external string.
   __ andb(rbx, Immediate(kIsNotStringMask |
@@ -2822,16 +2819,16 @@
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be sequential ascii or external.
+  // Any other flat string must be sequential ASCII or external.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask));
   __ j(not_zero, &external_string);
 
   __ bind(&seq_ascii_string);
-  // rdi: subject string (sequential ascii)
+  // rdi: subject string (sequential ASCII)
   // rax: RegExp data (FixedArray)
   __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(rcx, 1);  // Type is ascii.
+  __ Set(rcx, 1);  // Type is ASCII.
   __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
@@ -2847,7 +2844,7 @@
   __ JumpIfSmi(r11, &runtime);
 
   // rdi: subject string
-  // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
+  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
   // r11: code
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
@@ -2855,7 +2852,7 @@
 
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
   // r11: code
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
@@ -2912,7 +2909,7 @@
   // Keep track on aliasing between argX defined above and the registers used.
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ascii 0 if two_byte);
+  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
   // r11: code
   // r14: slice offset
   // r15: original subject string
@@ -3483,7 +3480,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(
       rdx, rax, rcx, rbx, &check_unequal_objects);
 
-  // Inline comparison of ascii strings.
+  // Inline comparison of ASCII strings.
   if (cc_ == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      rdx,
@@ -4518,7 +4515,7 @@
   __ SmiCompare(rbx, Smi::FromInt(2));
   __ j(not_equal, &longer_than_two);
 
-  // Check that both strings are non-external ascii strings.
+  // Check that both strings are non-external ASCII strings.
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
                                                   &call_runtime);
 
@@ -4550,7 +4547,7 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
+  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
   __ j(below, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
@@ -4558,7 +4555,7 @@
   __ j(above, &call_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object. If
-  // both strings are ascii the result is an ascii cons string.
+  // both strings are ASCII the result is an ASCII cons string.
   // rax: first string
   // rbx: length of resulting flat string
   // rdx: second string
@@ -4572,7 +4569,7 @@
   __ testl(rcx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
-  // Allocate an acsii cons string.
+  // Allocate an ASCII cons string.
   __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
@@ -4586,7 +4583,7 @@
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ascii characters.
+  // to contain only ASCII characters.
   // rcx: first instance type AND second instance type.
   // r8: first instance type.
   // r9: second instance type.
@@ -4602,7 +4599,7 @@
   __ jmp(&allocated);
 
   // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
   // Handle creating a flat result from either external or sequential strings.
   // Locate the first characters' locations.
   // rax: first string
@@ -4660,7 +4657,7 @@
   __ j(zero, &non_ascii_string_add_flat_result);
 
   __ bind(&make_flat_ascii_string);
-  // Both strings are ascii strings. As they are short they are both flat.
+  // Both strings are ASCII strings. As they are short they are both flat.
   __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
   // rax: result string
   // Locate first character of result.
@@ -4677,7 +4674,7 @@
   __ ret(2 * kPointerSize);
 
   __ bind(&non_ascii_string_add_flat_result);
-  // Both strings are ascii strings. As they are short they are both flat.
+  // Both strings are ASCII strings. As they are short they are both flat.
   __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
   // rax: result string
   // Locate first character of result.
@@ -4931,7 +4928,7 @@
     // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
     Register temp = kScratchRegister;
 
-    // Check that the candidate is a non-external ascii string.
+    // Check that the candidate is a non-external ASCII string.
     __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
         temp, temp, &next_probe[i]);
@@ -5411,7 +5408,7 @@
   // Check that both are sequential ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
 
-  // Inline comparison of ascii strings.
+  // Inline comparison of ASCII strings.
   __ IncrementCounter(counters->string_compare_native(), 1);
   // Drop arguments from the stack
   __ pop(rcx);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index eeef0e9..4387a32 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -106,7 +106,7 @@
 // formal parameter count expected by the function.
 //
 // The live registers are:
-//   o rdi: the JS function object being called (ie, ourselves)
+//   o rdi: the JS function object being called (i.e. ourselves)
 //   o rsi: our context
 //   o rbp: our caller's frame pointer
 //   o rsp: stack pointer (pointing to return address)
@@ -226,9 +226,15 @@
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
-    ArgumentsAccessStub stub(
-        is_classic_mode() ? ArgumentsAccessStub::NEW_NON_STRICT_SLOW
-                          : ArgumentsAccessStub::NEW_STRICT);
+    ArgumentsAccessStub::Type type;
+    if (!is_classic_mode()) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+    }
+    ArgumentsAccessStub stub(type);
     __ CallStub(&stub);
 
     SetVar(arguments, rax, rbx, rdx);
@@ -3530,7 +3536,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Get the separator ascii character value.
+  // Get the separator ASCII character value.
   // Register "string" holds the separator.
   __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ Set(index, 0);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 392c74d..350ff6f 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2565,7 +2565,7 @@
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
-  v8::internal::ParameterCount actual(rax);
+  ParameterCount actual(rax);
   __ InvokeFunction(function, actual, CALL_FUNCTION,
                     safepoint_generator, CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2615,34 +2615,47 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  // Change context if needed.
-  bool change_context =
-      (info()->closure()->context() != function->context()) ||
-      scope()->contains_with() ||
-      (scope()->num_heap_slots() > 0);
-  if (change_context) {
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-  }
-
-  // Set rax to arguments count if adaption is not needed. Assumes that rax
-  // is available to write to at this point.
-  if (!function->NeedsArgumentsAdaption()) {
-    __ Set(rax, arity);
-  }
+  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+      function->shared()->formal_parameter_count() == arity;
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  // Invoke function.
-  __ SetCallKind(rcx, call_kind);
-  if (*function == *info()->closure()) {
-    __ CallSelf();
-  } else {
-    __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-  }
+  if (can_invoke_directly) {
+    __ LoadHeapObject(rdi, function);
 
-  // Set up deoptimization.
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+    // Change context if needed.
+    bool change_context =
+        (info()->closure()->context() != function->context()) ||
+        scope()->contains_with() ||
+        (scope()->num_heap_slots() > 0);
+    if (change_context) {
+      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    }
+
+    // Set rax to arguments count if adaption is not needed. Assumes that rax
+    // is available to write to at this point.
+    if (!function->NeedsArgumentsAdaption()) {
+      __ Set(rax, arity);
+    }
+
+    // Invoke function.
+    __ SetCallKind(rcx, call_kind);
+    if (*function == *info()->closure()) {
+      __ CallSelf();
+    } else {
+      __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+    }
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+  } else {
+    // We need to adapt arguments.
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+  }
 
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2651,7 +2664,6 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  __ LoadHeapObject(rdi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2950,6 +2962,7 @@
   __ movq(global_object,
           FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 
   // Convert 32 random bits in rax to 0.(32 random bits) in a double
   // by computing:
@@ -3094,7 +3107,6 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
-  __ LoadHeapObject(rdi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 4755369..2d6bd08 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2114,7 +2114,7 @@
   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ascii strings.
+  // Check that both are flat ASCII strings.
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2160,7 +2160,7 @@
   movq(scratch1, first_object_instance_type);
   movq(scratch2, second_object_instance_type);
 
-  // Check that both are flat ascii strings.
+  // Check that both are flat ASCII strings.
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -3030,26 +3030,30 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
+  bool definitely_mismatches = false;
   InvokePrologue(expected,
                  actual,
                  Handle<Code>::null(),
                  code,
                  &done,
+                 &definitely_mismatches,
                  flag,
                  Label::kNear,
                  call_wrapper,
                  call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(rcx, call_kind);
-    call(code);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(rcx, call_kind);
-    jmp(code);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(rcx, call_kind);
+      call(code);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(rcx, call_kind);
+      jmp(code);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -3064,27 +3068,31 @@
   ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
+  bool definitely_mismatches = false;
   Register dummy = rax;
   InvokePrologue(expected,
                  actual,
                  code,
                  dummy,
                  &done,
+                 &definitely_mismatches,
                  flag,
                  Label::kNear,
                  call_wrapper,
                  call_kind);
-  if (flag == CALL_FUNCTION) {
-    call_wrapper.BeforeCall(CallSize(code));
-    SetCallKind(rcx, call_kind);
-    Call(code, rmode);
-    call_wrapper.AfterCall();
-  } else {
-    ASSERT(flag == JUMP_FUNCTION);
-    SetCallKind(rcx, call_kind);
-    Jump(code, rmode);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      SetCallKind(rcx, call_kind);
+      Call(code, rmode);
+      call_wrapper.AfterCall();
+    } else {
+      ASSERT(flag == JUMP_FUNCTION);
+      SetCallKind(rcx, call_kind);
+      Jump(code, rmode);
+    }
+    bind(&done);
   }
-  bind(&done);
 }
 
 
@@ -3136,11 +3144,13 @@
                                     Handle<Code> code_constant,
                                     Register code_register,
                                     Label* done,
+                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     Label::Distance near_jump,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
+  *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
     ASSERT(actual.is_immediate());
@@ -3156,6 +3166,7 @@
         // arguments.
         definitely_matches = true;
       } else {
+        *definitely_mismatches = true;
         Set(rbx, expected.immediate());
       }
     }
@@ -3192,7 +3203,9 @@
       SetCallKind(rcx, call_kind);
       Call(adaptor, RelocInfo::CODE_TARGET);
       call_wrapper.AfterCall();
-      jmp(done, near_jump);
+      if (!*definitely_mismatches) {
+        jmp(done, near_jump);
+      }
     } else {
       SetCallKind(rcx, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3825,7 +3838,7 @@
     subq(scratch1, Immediate(kHeaderAlignment));
   }
 
-  // Allocate ascii string in new space.
+  // Allocate ASCII string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
                      times_1,
                      scratch1,
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 8596852..aad76bc 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -203,7 +203,7 @@
                    Label* on_black,
                    Label::Distance on_black_distance = Label::kFar);
 
-  // Detects conservatively whether an object is data-only, ie it does need to
+  // Detects conservatively whether an object is data-only, i.e. it does need to
   // be scanned by the garbage collector.
   void JumpIfDataObject(Register value,
                         Register scratch,
@@ -745,7 +745,7 @@
       Label* on_not_both_flat_ascii,
       Label::Distance near_jump = Label::kFar);
 
-  // Check whether the instance type represents a flat ascii string. Jump to the
+  // Check whether the instance type represents a flat ASCII string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(
@@ -901,7 +901,7 @@
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
   // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specificed map.
+  // against maps that are ElementsKind transition maps of the specified map.
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
@@ -1309,6 +1309,7 @@
                       Handle<Code> code_constant,
                       Register code_register,
                       Label* done,
+                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       Label::Distance near_jump = Label::kFar,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 1e0cd6a..16730d2 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -226,7 +226,7 @@
                                               bool check_end_of_string) {
 #ifdef DEBUG
   // If input is ASCII, don't even bother calling here if the string to
-  // match contains a non-ascii character.
+  // match contains a non-ASCII character.
   if (mode_ == ASCII) {
     ASSERT(String::IsAscii(str.start(), str.length()));
   }
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 3633fbb..47c2177 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -982,7 +982,7 @@
   __ movq(name_arg, rsp);
   __ push(scratch2);  // Restore return address.
 
-  // 3 elements array for v8::Agruments::values_ and handler for name.
+  // 3 elements array for v8::Arguments::values_ and handler for name.
   const int kStackSpace = 4;
 
   // Allocate v8::AccessorInfo in non-GCed stack space.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index d001e65..8525f38 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -8912,17 +8912,6 @@
 }
 
 
-static v8::Handle<Value> call_ic_function5;
-static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name,
-                                                  const AccessorInfo& info) {
-  ApiTestFuzzer::Fuzz();
-  if (v8_str("x")->Equals(name))
-    return call_ic_function5;
-  else
-    return Local<Value>();
-}
-
-
 // This test checks that if interceptor doesn't provide a function,
 // cached constant function is used
 THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
@@ -8943,6 +8932,17 @@
 }
 
 
+static v8::Handle<Value> call_ic_function5;
+static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (v8_str("x")->Equals(name))
+    return call_ic_function5;
+  else
+    return Local<Value>();
+}
+
+
 // This test checks that if interceptor provides a function,
 // even if we cached constant function, interceptor's function
 // is invoked
@@ -8966,6 +8966,48 @@
 }
 
 
+static v8::Handle<Value> call_ic_function6;
+static v8::Handle<Value> InterceptorCallICGetter6(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (v8_str("x")->Equals(name))
+    return call_ic_function6;
+  else
+    return Local<Value>();
+}
+
+
+// Same test as above, except the code is wrapped in a function
+// to test the optimized compiler.
+THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorCallICGetter6);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  call_ic_function6 =
+      v8_compile("function f(x) { return x - 1; }; f")->Run();
+  v8::Handle<Value> value = CompileRun(
+    "function inc(x) { return x + 1; };"
+    "inc(1);"
+    "o.x = inc;"
+    "function test() {"
+    "  var result = 0;"
+    "  for (var i = 0; i < 1000; i++) {"
+    "    result = o.x(42);"
+    "  }"
+    "  return result;"
+    "};"
+    "test();"
+    "test();"
+    "test();"
+    "%OptimizeFunctionOnNextCall(test);"
+    "test()");
+  CHECK_EQ(41, value->Int32Value());
+}
+
+
 // Test the case when we stored constant function into
 // a stub, but it got invalidated later on
 THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
@@ -13725,6 +13767,10 @@
   TestResource* resource2 = new TestResource(two_byte_string);
   v8::Local<v8::String> string2 = v8::String::NewExternal(resource2);
 
+  // We need to add usages for string1 and string2 to avoid warnings in GCC 4.7
+  CHECK(string1->IsExternal());
+  CHECK(string2->IsExternal());
+
   VisitorImpl visitor(resource1, resource2);
   v8::V8::VisitExternalResources(&visitor);
   visitor.CheckVisitedResources();
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 1e4e332..7015a1e 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1572,3 +1572,38 @@
   HEAP->incremental_marking()->set_should_hurry(true);
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 }
+
+
+TEST(PrototypeTransitionClearing) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  CompileRun(
+      "var base = {};"
+      "var live = [];"
+      "for (var i = 0; i < 10; i++) {"
+      "  var object = {};"
+      "  var prototype = {};"
+      "  object.__proto__ = prototype;"
+      "  if (i >= 3) live.push(object, prototype);"
+      "}");
+
+  Handle<JSObject> baseObject =
+      v8::Utils::OpenHandle(
+          *v8::Handle<v8::Object>::Cast(
+              v8::Context::GetCurrent()->Global()->Get(v8_str("base"))));
+
+  // Verify that only dead prototype transitions are cleared.
+  CHECK_EQ(10, baseObject->map()->NumberOfProtoTransitions());
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  CHECK_EQ(10 - 3, baseObject->map()->NumberOfProtoTransitions());
+
+  // Verify that prototype transitions array was compacted.
+  FixedArray* trans = baseObject->map()->prototype_transitions();
+  for (int i = 0; i < 10 - 3; i++) {
+    int j = Map::kProtoTransitionHeaderSize +
+        i * Map::kProtoTransitionElementsPerEntry;
+    CHECK(trans->get(j + Map::kProtoTransitionMapOffset)->IsMap());
+    CHECK(trans->get(j + Map::kProtoTransitionPrototypeOffset)->IsJSObject());
+  }
+}
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 93f7588..e11349b 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -355,7 +355,7 @@
 
   // Make sure we cover all always-flat lengths and at least one above.
   static const int kMaxLength = 20;
-  CHECK_GT(kMaxLength, i::String::kMinNonFlatLength);
+  CHECK_GT(kMaxLength, i::ConsString::kMinLength);
 
   // Allocate two JavaScript arrays for holding short strings.
   v8::Handle<v8::Array> ascii_external_strings =
diff --git a/test/mjsunit/regress/regress-110509.js b/test/mjsunit/regress/regress-110509.js
new file mode 100644
index 0000000..132bd23
--- /dev/null
+++ b/test/mjsunit/regress/regress-110509.js
@@ -0,0 +1,41 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Verify that LRandom preserves rsi correctly.
+
+function foo() {
+  Math.random();
+  new Function("");
+}
+
+foo();
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/test/mjsunit/string-replace-one-char.js b/test/mjsunit/string-replace-one-char.js
new file mode 100644
index 0000000..f153acc
--- /dev/null
+++ b/test/mjsunit/string-replace-one-char.js
@@ -0,0 +1,92 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Make sure the strings are long enough to trigger the one-char string replace.
+var prefix1024 = "0123456789ABCDEF";
+for (var i = 0; i < 6; i++) prefix1024 += prefix1024;
+
+function test_replace(result, expected, search, replace) {
+  assertEquals(expected, result.replace(search, replace));
+}
+
+// '$' in the replace string.
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz",
+             prefix1024 + "abcdefghijk#l#mnopqrstuvwxyz",
+             "l", "#$&#");
+
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz\u1234",
+             prefix1024 + "abcdefghijk\u2012l\u2012mnopqrstuvwxyz\u1234",
+             "l", "\u2012$&\u2012");
+
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz",
+             prefix1024 + "abcdefghijk$mnopqrstuvwxyz",
+             "l", "$$");
+
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz\u1234",
+             prefix1024 + "abcdefghijk$mnopqrstuvwxyz\u1234",
+             "l", "$$");
+
+// Zero length replace string.
+test_replace(prefix1024 + "abcdefghijklmnopqrstuvwxyz",
+             prefix1024 + "abcdefghijklmnopqstuvwxyz",
+             "r", "");
+
+test_replace(prefix1024 + "abcdefghijklmnopq\u1234stuvwxyz",
+             prefix1024 + "abcdefghijklmnopqstuvwxyz",
+             "\u1234", "");
+
+// Search char not found.
+var not_found_1 = prefix1024 + "abcdefghijklmnopqrstuvwxyz";
+test_replace(not_found_1, not_found_1, "+", "-");
+
+var not_found_2 = prefix1024 + "abcdefghijklm\u1234nopqrstuvwxyz";
+test_replace(not_found_2, not_found_2, "+", "---");
+
+var not_found_3 = prefix1024 + "abcdefghijklmnopqrstuvwxyz";
+test_replace(not_found_3, not_found_3, "\u1234", "ZZZ");
+
+// Deep cons tree.
+var nested_1 = "";
+for (var i = 0; i < 1000000; i++) nested_1 += "y";
+var nested_1_result = prefix1024 + nested_1 + "aa";
+nested_1 = prefix1024 + nested_1 + "z";
+test_replace(nested_1, nested_1_result, "z", "aa");
+
+var nested_2 = "\u2244";
+for (var i = 0; i < 1000000; i++) nested_2 += "y";
+var nested_2_result = prefix1024 + nested_2 + "aa";
+nested_2 = prefix1024 + nested_2 + "\u2012";
+test_replace(nested_2, nested_2_result, "\u2012", "aa");
+
+// Sliced string as input.  A cons string is always flattened before sliced.
+var slice_1 = ("ab" + prefix1024 + "cdefghijklmnopqrstuvwxyz").slice(1, -1);
+var slice_1_result = "b" + prefix1024 + "cdefghijklmnopqrstuvwxQ";
+test_replace(slice_1, slice_1_result, "y", "Q");
+
+var slice_2 = (prefix1024 + "abcdefghijklmno\u1234\u1234p").slice(1, -1);
+var slice_2_result = prefix1024.substr(1) + "abcdefghijklmnoQ\u1234";
+test_replace(slice_2, slice_2_result, "\u1234", "Q");