Version 3.5.9.

Made FromPropertyDescriptor not trigger inherited setters.

Fixed .gyp files to work on the ARM simulator.

Fixed shared library build warnings for MSVS.


git-svn-id: http://v8.googlecode.com/svn/trunk@9041 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 6b45064..2deff90 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2011-08-29: Version 3.5.9
+
+        Made FromPropertyDescriptor not trigger inherited setters.
+
+        Fixed .gyp files to work on the ARM simulator.
+
+        Fixed shared library build warnings for MSVS.
+
+
 2011-08-24: Version 3.5.8
 
         Added V8EXPORT attributes for v8::Array::CheckCast and
diff --git a/Makefile b/Makefile
index 85a8f6f..3008779 100644
--- a/Makefile
+++ b/Makefile
@@ -31,40 +31,86 @@
 LINK ?= "g++"
 OUTDIR ?= out
 TESTJOBS ?= -j16
-GYPFLAGS ?= -Dv8_can_use_vfp_instructions=true
+GYPFLAGS ?=
 
-# Architectures and modes to be compiled.
+# Special build flags. Use them like this: "make library=shared"
+
+# library=shared || component=shared_library
+ifeq ($(library), shared)
+  GYPFLAGS += -Dcomponent=shared_library
+endif
+ifdef component
+  GYPFLAGS += -Dcomponent=$(component)
+endif
+# console=readline
+ifdef console
+  GYPFLAGS += -Dconsole=$(console)
+endif
+# disassembler=on
+ifeq ($(disassembler), on)
+  GYPFLAGS += -Dv8_enable_disassembler=1
+endif
+# snapshot=off
+ifeq ($(snapshot), off)
+  GYPFLAGS += -Dv8_use_snapshot='false'
+endif
+# gdbjit=on
+ifeq ($(gdbjit), on)
+  GYPFLAGS += -Dv8_enable_gdbjit=1
+endif
+# liveobjectlist=on
+ifeq ($(liveobjectlist), on)
+  GYPFLAGS += -Dv8_use_liveobjectlist=true
+endif
+# vfp3=off
+ifeq ($(vfp3), off)
+  GYPFLAGS += -Dv8_can_use_vfp_instructions=false
+else
+  GYPFLAGS += -Dv8_can_use_vfp_instructions=true
+endif
+
+# ----------------- available targets: --------------------
+# - any arch listed in ARCHES (see below)
+# - any mode listed in MODES
+# - every combination <arch>.<mode>, e.g. "ia32.release"
+# - any of the above with .check appended, e.g. "ia32.release.check"
+# - default (no target specified): build all ARCHES and MODES
+# - "check": build all targets and run all tests
+# - "<arch>.clean" for any <arch> in ARCHES
+# - "clean": clean all ARCHES
+
+# ----------------- internal stuff ------------------------
+
+# Architectures and modes to be compiled. Consider these to be internal
+# variables, don't override them (use the targets instead).
 ARCHES = ia32 x64 arm
 MODES = release debug
 
 # List of files that trigger Makefile regeneration:
-GYPFILES = build/all.gyp build/common.gypi build/v8-features.gypi \
+GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
            preparser/preparser.gyp samples/samples.gyp src/d8.gyp \
            test/cctest/cctest.gyp tools/gyp/v8.gyp
 
 # Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
 BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
-CHECKS = $(addsuffix .check,$(BUILDS))
 # Generates corresponding test targets, e.g. "ia32.release.check".
+CHECKS = $(addsuffix .check,$(BUILDS))
+# File where previously used GYPFLAGS are stored.
+ENVFILE = $(OUTDIR)/environment
 
-.PHONY: all release debug ia32 x64 arm $(BUILDS)
+.PHONY: all clean $(ENVFILE).new \
+        $(ARCHES) $(MODES) $(BUILDS) $(addsuffix .clean,$(ARCHES))
 
-# Target definitions. "all" is the default, you can specify any others on the
-# command line, e.g. "make ia32". Targets defined in $(BUILDS), e.g.
-# "ia32.debug", can also be specified.
-all: release debug
+# Target definitions. "all" is the default.
+all: $(MODES)
 
-release: $(addsuffix .release,$(ARCHES))
-
-debug: $(addsuffix .debug,$(ARCHES))
-
-ia32: $(addprefix ia32.,$(MODES))
-
-x64: $(addprefix x64.,$(MODES))
-
-arm: $(addprefix arm.,$(MODES))
-
+# Compile targets. MODES and ARCHES are convenience targets.
 .SECONDEXPANSION:
+$(MODES): $(addsuffix .$$@,$(ARCHES))
+
+$(ARCHES): $(addprefix $$@.,$(MODES))
+
+# Defines how to build a particular target (e.g. ia32.release).
 $(BUILDS): $(OUTDIR)/Makefile-$$(basename $$@)
 	@$(MAKE) -C "$(OUTDIR)" -f Makefile-$(basename $@) \
 	         CXX="$(CXX)" LINK="$(LINK)" \
@@ -77,34 +123,49 @@
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
 
 $(addsuffix .check,$(MODES)): $$(basename $$@)
-	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) --mode=$(basename $@)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --mode=$(basename $@)
 
 $(addsuffix .check,$(ARCHES)): $$(basename $$@)
-	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) --arch=$(basename $@)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch=$(basename $@)
 
 $(CHECKS): $$(basename $$@)
-	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) --arch-and-mode=$(basename $@)
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch-and-mode=$(basename $@)
 
 # Clean targets. You can clean each architecture individually, or everything.
 $(addsuffix .clean,$(ARCHES)):
 	rm -f $(OUTDIR)/Makefile-$(basename $@)
 	rm -rf $(OUTDIR)/$(basename $@).release
 	rm -rf $(OUTDIR)/$(basename $@).debug
+	find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
 
 clean: $(addsuffix .clean,$(ARCHES))
 
 # GYP file generation targets.
-$(OUTDIR)/Makefile-ia32: $(GYPFILES)
+$(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-	              -Ibuild/common.gypi --depth=. -Dtarget_arch=ia32 -S-ia32 \
-	              $(GYPFLAGS)
+	              -Ibuild/standalone.gypi --depth=. -Dtarget_arch=ia32 \
+	              -S-ia32 $(GYPFLAGS)
 
-$(OUTDIR)/Makefile-x64: $(GYPFILES)
+$(OUTDIR)/Makefile-x64: $(GYPFILES) $(ENVFILE)
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-	              -Ibuild/common.gypi --depth=. -Dtarget_arch=x64 -S-x64 \
-	              $(GYPFLAGS)
+	              -Ibuild/standalone.gypi --depth=. -Dtarget_arch=x64 \
+	              -S-x64 $(GYPFLAGS)
 
-$(OUTDIR)/Makefile-arm: $(GYPFILES) build/armu.gypi
+$(OUTDIR)/Makefile-arm: $(GYPFILES) $(ENVFILE)
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
-	              -Ibuild/common.gypi --depth=. -Ibuild/armu.gypi -S-arm \
-	              $(GYPFLAGS)
+	              -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
+	              -S-arm $(GYPFLAGS)
+
+# Replaces the old with the new environment file if they're different, which
+# will trigger GYP to regenerate Makefiles.
+$(ENVFILE): $(ENVFILE).new
+	@if test -r $(ENVFILE) && cmp $(ENVFILE).new $(ENVFILE) >/dev/null; \
+	    then rm $(ENVFILE).new; \
+	    else mv $(ENVFILE).new $(ENVFILE); fi
+
+# Stores current GYPFLAGS in a file.
+$(ENVFILE).new:
+	@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS)" > $(ENVFILE).new;
diff --git a/build/common.gypi b/build/common.gypi
index c41b2b2..834516f 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -25,184 +25,266 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# Shared definitions for all V8-related targets.
+
 {
   'variables': {
-    'library%': 'static_library',
-    'component%': 'static_library',
-    'visibility%': 'hidden',
-    'msvs_multi_core_compile%': '1',
-    'variables': {
-      'variables': {
-        'conditions': [
-          [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-            # This handles the Linux platforms we generally deal with. Anything
-            # else gets passed through, which probably won't work very well; such
-            # hosts should pass an explicit target_arch to gyp.
-            'host_arch%':
-              '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
-          }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
-            'host_arch%': 'ia32',
-          }],
-        ],
-      },
-      'host_arch%': '<(host_arch)',
-      'target_arch%': '<(host_arch)',
-      'v8_target_arch%': '<(target_arch)',
-    },
-    'host_arch%': '<(host_arch)',
-    'target_arch%': '<(target_arch)',
-    'v8_target_arch%': '<(v8_target_arch)',
+    'use_system_v8%': 0,
+    'msvs_use_common_release': 0,
+    'gcc_version%': 'unknown',
+    'v8_compress_startup_data%': 'off',
+    'v8_target_arch%': '<(target_arch)',
+
+    # Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
+    # generated by V8 to do unaligned memory access, and setting it to 'false'
+    # will ensure that the generated code will always do aligned memory
+    # accesses. The default value of 'default' will try to determine the correct
+    # setting. Note that for Intel architectures (ia32 and x64) unaligned memory
+    # access is allowed for all CPUs.
+    'v8_can_use_unaligned_accesses%': 'default',
+
+    # Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP
+    # instructions in the V8 generated code. VFP instructions will be enabled
+    # both for the snapshot and for the ARM target. Leaving the default value
+    # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
+    # probing when running on the target.
+    'v8_can_use_vfp_instructions%': 'false',
+
+    # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
+    # EABI calling convention where double arguments are passed in VFP
+    # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
+    # well when compiling for the ARM target.
+    'v8_use_arm_eabi_hardfloat%': 'false',
+
     'v8_enable_debugger_support%': 1,
-    'conditions': [
-      ['(v8_target_arch=="arm" and host_arch!="arm") or \
-        (v8_target_arch=="x64" and host_arch!="x64")', {
-        'want_separate_host_toolset': 1,
-      }, {
-        'want_separate_host_toolset': 0,
-      }],
-    ],
+
+    'v8_enable_disassembler%': 0,
+
+    'v8_enable_gdbjit%': 0,
+
+    # Enable profiling support. Only required on Windows.
+    'v8_enable_prof%': 0,
+
+    # Chrome needs this definition unconditionally. For standalone V8 builds,
+    # it's handled in build/standalone.gypi.
+    'want_separate_host_toolset%': 1,
+
+    'v8_use_snapshot%': 'true',
+    'host_os%': '<(OS)',
+    'v8_use_liveobjectlist%': 'false',
   },
   'target_defaults': {
-    'default_configuration': 'Debug',
     'conditions': [
       ['v8_enable_debugger_support==1', {
-          'defines': ['ENABLE_DEBUGGER_SUPPORT',],
+        'defines': ['ENABLE_DEBUGGER_SUPPORT',],
+      }],
+      ['v8_enable_disassembler==1', {
+        'defines': ['ENABLE_DISASSEMBLER',],
+      }],
+      ['v8_enable_gdbjit==1', {
+        'defines': ['ENABLE_GDB_JIT_INTERFACE',],
+      }],
+      ['OS!="mac"', {
+        # TODO(mark): The OS!="mac" conditional is temporary. It can be
+        # removed once the Mac Chromium build stops setting target_arch to
+        # ia32 and instead sets it to mac. Other checks in this file for
+        # OS=="mac" can be removed at that time as well. This can be cleaned
+        # up once http://crbug.com/44205 is fixed.
+        'conditions': [
+          ['v8_target_arch=="arm"', {
+            'defines': [
+              'V8_TARGET_ARCH_ARM',
+            ],
+            'conditions': [
+              [ 'v8_can_use_unaligned_accesses=="true"', {
+                'defines': [
+                  'CAN_USE_UNALIGNED_ACCESSES=1',
+                ],
+              }],
+              [ 'v8_can_use_unaligned_accesses=="false"', {
+                'defines': [
+                  'CAN_USE_UNALIGNED_ACCESSES=0',
+                ],
+              }],
+              [ 'v8_can_use_vfp_instructions=="true"', {
+                'defines': [
+                  'CAN_USE_VFP_INSTRUCTIONS',
+                ],
+              }],
+              [ 'v8_use_arm_eabi_hardfloat=="true"', {
+                'defines': [
+                  'USE_EABI_HARDFLOAT=1',
+                  'CAN_USE_VFP_INSTRUCTIONS',
+                ],
+                'cflags': [
+                  '-mfloat-abi=hard',
+                ],
+              }, {
+                'defines': [
+                  'USE_EABI_HARDFLOAT=0',
+                ],
+              }],
+              # The ARM assembler assumes the host is 32 bits,
+              # so force building 32-bit host tools.
+              ['host_arch=="x64"', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'cflags': ['-m32'],
+                    'ldflags': ['-m32'],
+                  }],
+                ],
+              }],
+            ],
+          }],
+          ['v8_target_arch=="ia32"', {
+            'defines': [
+              'V8_TARGET_ARCH_IA32',
+            ],
+          }],
+          ['v8_target_arch=="mips"', {
+            'defines': [
+              'V8_TARGET_ARCH_MIPS',
+            ],
+          }],
+          ['v8_target_arch=="x64"', {
+            'defines': [
+              'V8_TARGET_ARCH_X64',
+            ],
+          }],
+        ],
+      }],
+      ['v8_use_liveobjectlist=="true"', {
+        'defines': [
+          'ENABLE_DEBUGGER_SUPPORT',
+          'INSPECTOR',
+          'OBJECT_PRINT',
+          'LIVEOBJECTLIST',
+        ],
+      }],
+      ['v8_compress_startup_data=="bz2"', {
+        'defines': [
+          'COMPRESS_STARTUP_DATA_BZ2',
+        ],
+      }],
+      ['OS=="win" and v8_enable_prof==1', {
+        'msvs_settings': {
+          'VCLinkerTool': {
+            'GenerateMapFile': 'true',
+          },
         },
-      ],
+      }],
     ],
     'configurations': {
       'Debug': {
-        'cflags': [ '-g', '-O0' ],
-        'defines': [ 'ENABLE_DISASSEMBLER', 'DEBUG', 'V8_ENABLE_CHECKS',
-                     'OBJECT_PRINT' ],
+        'defines': [
+          'DEBUG',
+          'ENABLE_DISASSEMBLER',
+          'V8_ENABLE_CHECKS',
+          'OBJECT_PRINT',
+        ],
+        'msvs_settings': {
+          'VCCLCompilerTool': {
+            'Optimization': '0',
+
+            'conditions': [
+              ['OS=="win" and component=="shared_library"', {
+                'RuntimeLibrary': '3',  # /MDd
+              }, {
+                'RuntimeLibrary': '1',  # /MTd
+              }],
+            ],
+          },
+          'VCLinkerTool': {
+            'LinkIncremental': '2',
+            # For future reference, the stack size needs to be increased
+            # when building for Windows 64-bit, otherwise some test cases
+            # can cause stack overflow.
+            # 'StackReserveSize': '297152',
+          },
+        },
+        'conditions': [
+          ['OS=="freebsd" or OS=="openbsd"', {
+            'cflags': [ '-I/usr/local/include' ],
+          }],
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+            'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
+                        '-Wnon-virtual-dtor' ],
+          }],
+        ],
       },
       'Release': {
-        'cflags': [ '-O3', '-fomit-frame-pointer', '-fdata-sections',
-                    '-ffunction-sections' ],
+        'conditions': [
+          ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+            'cflags!': [
+              '-O2',
+              '-Os',
+            ],
+            'cflags': [
+              '-fdata-sections',
+              '-ffunction-sections',
+              '-fomit-frame-pointer',
+              '-O3',
+            ],
+            'conditions': [
+              [ 'gcc_version==44', {
+                'cflags': [
+                  # Avoid crashes with gcc 4.4 in the v8 test suite.
+                  '-fno-tree-vrp',
+                ],
+              }],
+            ],
+          }],
+          ['OS=="freebsd" or OS=="openbsd"', {
+            'cflags': [ '-I/usr/local/include' ],
+          }],
+          ['OS=="mac"', {
+            'xcode_settings': {
+              'GCC_OPTIMIZATION_LEVEL': '3',  # -O3
+
+              # -fstrict-aliasing.  Mainline gcc
+              # enables this at -O2 and above,
+              # but Apple gcc does not unless it
+              # is specified explicitly.
+              'GCC_STRICT_ALIASING': 'YES',
+            },
+          }],
+          ['OS=="win"', {
+            'msvs_configuration_attributes': {
+              'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
+              'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
+              'CharacterSet': '1',
+            },
+            'msvs_settings': {
+              'VCCLCompilerTool': {
+                'Optimization': '2',
+                'InlineFunctionExpansion': '2',
+                'EnableIntrinsicFunctions': 'true',
+                'FavorSizeOrSpeed': '0',
+                'OmitFramePointers': 'true',
+                'StringPooling': 'true',
+
+                'conditions': [
+                  ['OS=="win" and component=="shared_library"', {
+                    'RuntimeLibrary': '2',  #/MD
+                  }, {
+                    'RuntimeLibrary': '0',  #/MT
+                  }],
+                ],
+              },
+              'VCLinkerTool': {
+                'LinkIncremental': '1',
+                'OptimizeReferences': '2',
+                'OptimizeForWindows98': '1',
+                'EnableCOMDATFolding': '2',
+                # For future reference, the stack size needs to be
+                # increased when building for Windows 64-bit, otherwise
+                # some test cases can cause stack overflow.
+                # 'StackReserveSize': '297152',
+              },
+            },
+          }],
+        ],
       },
     },
   },
-  'conditions': [
-    [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
-      'target_defaults': {
-        'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
-                    '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
-                    '-fno-exceptions', '-pedantic' ],
-        'ldflags': [ '-pthread', ],
-        'conditions': [
-          [ 'target_arch=="ia32"', {
-            'cflags': [ '-m32' ],
-            'ldflags': [ '-m32' ],
-          }],
-          [ 'OS=="linux"', {
-            'cflags': [ '-ansi' ],
-          }],
-          [ 'visibility=="hidden"', {
-            'cflags': [ '-fvisibility=hidden' ],
-          }],
-        ],
-      },
-    }],  # 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"'
-    ['OS=="win"', {
-      'target_defaults': {
-        'defines': [
-          'WIN32',
-          '_CRT_SECURE_NO_DEPRECATE',
-          '_CRT_NONSTDC_NO_DEPRECATE',
-        ],
-        'conditions': [
-          ['component=="static_library"', {
-            'defines': [
-              '_HAS_EXCEPTIONS=0',
-            ],
-          }],
-        ],        
-        'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
-        'msvs_disabled_warnings': [4355, 4800],
-        'msvs_settings': {
-          'VCCLCompilerTool': {
-            'MinimalRebuild': 'false',
-            'BufferSecurityCheck': 'true',
-            'EnableFunctionLevelLinking': 'true',
-            'RuntimeTypeInfo': 'false',
-            'WarningLevel': '3',
-            'WarnAsError': 'true',
-            'DebugInformationFormat': '3',
-            'Detect64BitPortabilityProblems': 'false',
-            'conditions': [
-              [ 'msvs_multi_core_compile', {
-                'AdditionalOptions': ['/MP'],
-              }],
-              ['component=="shared_library"', {
-                'ExceptionHandling': '1',  # /EHsc
-              }, {
-                'ExceptionHandling': '0',
-              }],
-            ],
-          },
-          'VCLibrarianTool': {
-            'AdditionalOptions': ['/ignore:4221'],
-          },
-          'VCLinkerTool': {
-            'AdditionalDependencies': [
-              'ws2_32.lib',
-            ],
-            'GenerateDebugInformation': 'true',
-            'MapFileName': '$(OutDir)\\$(TargetName).map',
-            'ImportLibrary': '$(OutDir)\\lib\\$(TargetName).lib',
-            'FixedBaseAddress': '1',
-            # LinkIncremental values:
-            #   0 == default
-            #   1 == /INCREMENTAL:NO
-            #   2 == /INCREMENTAL
-            'LinkIncremental': '1',
-            # SubSystem values:
-            #   0 == not set
-            #   1 == /SUBSYSTEM:CONSOLE
-            #   2 == /SUBSYSTEM:WINDOWS
-            'SubSystem': '1',
-          },
-        },
-      },
-    }],  # OS=="win"
-    ['OS=="mac"', {
-      'target_defaults': {
-        'xcode_settings': {
-          'ALWAYS_SEARCH_USER_PATHS': 'NO',
-          'GCC_C_LANGUAGE_STANDARD': 'ansi',        # -ansi
-          'GCC_CW_ASM_SYNTAX': 'NO',                # No -fasm-blocks
-          'GCC_DYNAMIC_NO_PIC': 'NO',               # No -mdynamic-no-pic
-                                                    # (Equivalent to -fPIC)
-          'GCC_ENABLE_CPP_EXCEPTIONS': 'NO',        # -fno-exceptions
-          'GCC_ENABLE_CPP_RTTI': 'NO',              # -fno-rtti
-          'GCC_ENABLE_PASCAL_STRINGS': 'NO',        # No -mpascal-strings
-          # GCC_INLINES_ARE_PRIVATE_EXTERN maps to -fvisibility-inlines-hidden
-          'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
-          'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES',      # -fvisibility=hidden
-          'GCC_THREADSAFE_STATICS': 'NO',           # -fno-threadsafe-statics
-          'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES',    # -Werror
-          'GCC_VERSION': '4.2',
-          'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',  # -Wnewline-eof
-          'MACOSX_DEPLOYMENT_TARGET': '10.4',       # -mmacosx-version-min=10.4
-          'PREBINDING': 'NO',                       # No -Wl,-prebind
-          'USE_HEADERMAP': 'NO',
-          'OTHER_CFLAGS': [
-            '-fno-strict-aliasing',
-          ],
-          'WARNING_CFLAGS': [
-            '-Wall',
-            '-Wendif-labels',
-            '-W',
-            '-Wno-unused-parameter',
-            '-Wnon-virtual-dtor',
-          ],
-        },
-        'target_conditions': [
-          ['_type!="static_library"', {
-            'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
-          }],
-        ],  # target_conditions
-      },  # target_defaults
-    }],  # OS=="mac"
-  ],
 }
diff --git a/build/gyp_v8 b/build/gyp_v8
index 61b54c2..dfdbe3f 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -92,8 +92,8 @@
     if os.path.realpath(path) not in specified_includes:
       result.append(path)
 
-  # Always include common.gypi & features_override.gypi
-  AddInclude(os.path.join(script_dir, 'common.gypi'))
+  # Always include standalone.gypi
+  AddInclude(os.path.join(script_dir, 'standalone.gypi'))
 
   # Optionally add supplemental .gypi files if present.
   supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
diff --git a/build/standalone.gypi b/build/standalone.gypi
new file mode 100644
index 0000000..81909f1
--- /dev/null
+++ b/build/standalone.gypi
@@ -0,0 +1,200 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Definitions to be used when building stand-alone V8 binaries.
+
+{
+  'variables': {
+    'library%': 'static_library',
+    'component%': 'static_library',
+    'visibility%': 'hidden',
+    'msvs_multi_core_compile%': '1',
+    'variables': {
+      'variables': {
+        'conditions': [
+          [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+            # This handles the Linux platforms we generally deal with. Anything
+            # else gets passed through, which probably won't work very well; such
+            # hosts should pass an explicit target_arch to gyp.
+            'host_arch%':
+              '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
+          }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
+            'host_arch%': 'ia32',
+          }],
+        ],
+      },
+      'host_arch%': '<(host_arch)',
+      'target_arch%': '<(host_arch)',
+      'v8_target_arch%': '<(target_arch)',
+    },
+    'host_arch%': '<(host_arch)',
+    'target_arch%': '<(target_arch)',
+    'v8_target_arch%': '<(v8_target_arch)',
+    'conditions': [
+      ['(v8_target_arch=="arm" and host_arch!="arm") or \
+        (v8_target_arch=="x64" and host_arch!="x64")', {
+        'want_separate_host_toolset': 1,
+      }, {
+        'want_separate_host_toolset': 0,
+      }],
+    ],
+  },
+  'target_defaults': {
+    'default_configuration': 'Debug',
+    'configurations': {
+      'Debug': {
+        'cflags': [ '-g', '-O0' ],
+      },
+    },
+  },
+  'conditions': [
+    [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+      'target_defaults': {
+        'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
+                    '-Wnon-virtual-dtor', '-pthread', '-fno-rtti',
+                    '-fno-exceptions', '-pedantic' ],
+        'ldflags': [ '-pthread', ],
+        'conditions': [
+          [ 'target_arch=="ia32"', {
+            'cflags': [ '-m32' ],
+            'ldflags': [ '-m32' ],
+          }],
+          [ 'OS=="linux"', {
+            'cflags': [ '-ansi' ],
+          }],
+          [ 'visibility=="hidden"', {
+            'cflags': [ '-fvisibility=hidden' ],
+          }],
+          [ 'component=="shared_library"', {
+            'cflags': [ '-fPIC', ],
+          }],
+        ],
+      },
+    }],  # 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"'
+    ['OS=="win"', {
+      'target_defaults': {
+        'defines': [
+          'WIN32',
+          '_CRT_SECURE_NO_DEPRECATE',
+          '_CRT_NONSTDC_NO_DEPRECATE',
+        ],
+        'conditions': [
+          ['component=="static_library"', {
+            'defines': [
+              '_HAS_EXCEPTIONS=0',
+            ],
+          }],
+        ],
+        'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
+        'msvs_disabled_warnings': [4355, 4800],
+        'msvs_settings': {
+          'VCCLCompilerTool': {
+            'MinimalRebuild': 'false',
+            'BufferSecurityCheck': 'true',
+            'EnableFunctionLevelLinking': 'true',
+            'RuntimeTypeInfo': 'false',
+            'WarningLevel': '3',
+            'WarnAsError': 'true',
+            'DebugInformationFormat': '3',
+            'Detect64BitPortabilityProblems': 'false',
+            'conditions': [
+              [ 'msvs_multi_core_compile', {
+                'AdditionalOptions': ['/MP'],
+              }],
+              ['component=="shared_library"', {
+                'ExceptionHandling': '1',  # /EHsc
+              }, {
+                'ExceptionHandling': '0',
+              }],
+            ],
+          },
+          'VCLibrarianTool': {
+            'AdditionalOptions': ['/ignore:4221'],
+          },
+          'VCLinkerTool': {
+            'AdditionalDependencies': [
+              'ws2_32.lib',
+            ],
+            'GenerateDebugInformation': 'true',
+            'MapFileName': '$(OutDir)\\$(TargetName).map',
+            'ImportLibrary': '$(OutDir)\\lib\\$(TargetName).lib',
+            'FixedBaseAddress': '1',
+            # LinkIncremental values:
+            #   0 == default
+            #   1 == /INCREMENTAL:NO
+            #   2 == /INCREMENTAL
+            'LinkIncremental': '1',
+            # SubSystem values:
+            #   0 == not set
+            #   1 == /SUBSYSTEM:CONSOLE
+            #   2 == /SUBSYSTEM:WINDOWS
+            'SubSystem': '1',
+          },
+        },
+      },
+    }],  # OS=="win"
+    ['OS=="mac"', {
+      'target_defaults': {
+        'xcode_settings': {
+          'ALWAYS_SEARCH_USER_PATHS': 'NO',
+          'GCC_C_LANGUAGE_STANDARD': 'ansi',        # -ansi
+          'GCC_CW_ASM_SYNTAX': 'NO',                # No -fasm-blocks
+          'GCC_DYNAMIC_NO_PIC': 'NO',               # No -mdynamic-no-pic
+                                                    # (Equivalent to -fPIC)
+          'GCC_ENABLE_CPP_EXCEPTIONS': 'NO',        # -fno-exceptions
+          'GCC_ENABLE_CPP_RTTI': 'NO',              # -fno-rtti
+          'GCC_ENABLE_PASCAL_STRINGS': 'NO',        # No -mpascal-strings
+          # GCC_INLINES_ARE_PRIVATE_EXTERN maps to -fvisibility-inlines-hidden
+          'GCC_INLINES_ARE_PRIVATE_EXTERN': 'YES',
+          'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES',      # -fvisibility=hidden
+          'GCC_THREADSAFE_STATICS': 'NO',           # -fno-threadsafe-statics
+          'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES',    # -Werror
+          'GCC_VERSION': '4.2',
+          'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',  # -Wnewline-eof
+          'MACOSX_DEPLOYMENT_TARGET': '10.4',       # -mmacosx-version-min=10.4
+          'PREBINDING': 'NO',                       # No -Wl,-prebind
+          'USE_HEADERMAP': 'NO',
+          'OTHER_CFLAGS': [
+            '-fno-strict-aliasing',
+          ],
+          'WARNING_CFLAGS': [
+            '-Wall',
+            '-Wendif-labels',
+            '-W',
+            '-Wno-unused-parameter',
+            '-Wnon-virtual-dtor',
+          ],
+        },
+        'target_conditions': [
+          ['_type!="static_library"', {
+            'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
+          }],
+        ],  # target_conditions
+      },  # target_defaults
+    }],  # OS=="mac"
+  ],
+}
diff --git a/build/v8-features.gypi b/build/v8-features.gypi
deleted file mode 100644
index 45235db..0000000
--- a/build/v8-features.gypi
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# TODO(sgjesse): This is currently copied from v8.gyp, should probably
-# be refactored.
-{
-  'variables': {
-    'use_system_v8%': 0,
-    'msvs_use_common_release': 0,
-    'gcc_version%': 'unknown',
-    'v8_compress_startup_data%': 'off',
-    'v8_target_arch%': '<(target_arch)',
-
-    # Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
-    # generated by V8 to do unaligned memory access, and setting it to 'false'
-    # will ensure that the generated code will always do aligned memory
-    # accesses. The default value of 'default' will try to determine the correct
-    # setting. Note that for Intel architectures (ia32 and x64) unaligned memory
-    # access is allowed for all CPUs.
-    'v8_can_use_unaligned_accesses%': 'default',
-
-    # Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP
-    # instructions in the V8 generated code. VFP instructions will be enabled
-    # both for the snapshot and for the ARM target. Leaving the default value
-    # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
-    # probing when running on the target.
-    'v8_can_use_vfp_instructions%': 'false',
-
-    # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
-    # EABI calling convention where double arguments are passed in VFP
-    # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
-    # well when compiling for the ARM target.
-    'v8_use_arm_eabi_hardfloat%': 'false',
-
-    'v8_use_snapshot%': 'true',
-    'host_os%': '<(OS)',
-    'v8_use_liveobjectlist%': 'false',
-  },
-  'target_defaults': {
-    'conditions': [
-      ['OS!="mac"', {
-        'conditions': [
-          ['v8_target_arch=="arm"', {
-            'defines': [
-              'V8_TARGET_ARCH_ARM',
-            ],
-            'conditions': [
-              [ 'v8_can_use_unaligned_accesses=="true"', {
-                'defines': [
-                  'CAN_USE_UNALIGNED_ACCESSES=1',
-                ],
-              }],
-              [ 'v8_can_use_unaligned_accesses=="false"', {
-                'defines': [
-                  'CAN_USE_UNALIGNED_ACCESSES=0',
-                ],
-              }],
-              [ 'v8_can_use_vfp_instructions=="true"', {
-                'defines': [
-                  'CAN_USE_VFP_INSTRUCTIONS',
-                ],
-              }],
-              [ 'v8_use_arm_eabi_hardfloat=="true"', {
-                'defines': [
-                  'USE_EABI_HARDFLOAT=1',
-                  'CAN_USE_VFP_INSTRUCTIONS',
-                ],
-                'cflags': [
-                  '-mfloat-abi=hard',
-                ],
-              }, {
-                'defines': [
-                  'USE_EABI_HARDFLOAT=0',
-                ],
-              }],
-            ],
-          }],
-          ['v8_target_arch=="ia32"', {
-            'defines': [
-              'V8_TARGET_ARCH_IA32',
-            ],
-          }],
-          ['v8_target_arch=="mips"', {
-            'defines': [
-              'V8_TARGET_ARCH_MIPS',
-            ],
-          }],
-          ['v8_target_arch=="x64"', {
-            'defines': [
-              'V8_TARGET_ARCH_X64',
-            ],
-          }],
-        ],
-      }],
-    ],
-    'configurations': {
-      'Debug': {
-        'defines': [
-          'DEBUG',
-          '_DEBUG',
-          'ENABLE_DISASSEMBLER',
-          'V8_ENABLE_CHECKS',
-          'OBJECT_PRINT',
-        ],
-      }
-    }
-  }
-}
diff --git a/preparser/preparser.gyp b/preparser/preparser.gyp
index bcccb48..0b03382 100644
--- a/preparser/preparser.gyp
+++ b/preparser/preparser.gyp
@@ -26,6 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
+  'includes': ['../build/common.gypi'],
   'targets': [
     {
       'target_name': 'preparser',
diff --git a/samples/samples.gyp b/samples/samples.gyp
index 7023e0a..55b2a98 100644
--- a/samples/samples.gyp
+++ b/samples/samples.gyp
@@ -26,6 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
+  'includes': ['../build/common.gypi'],
   'target_defaults': {
     'type': 'executable',
     'dependencies': [
diff --git a/src/accessors.cc b/src/accessors.cc
index 806c679..e7d6aa0 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -680,6 +680,51 @@
 }
 
 
+class FrameFunctionIterator {
+ public:
+  FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
+      : frame_iterator_(isolate),
+        functions_(2),
+        index_(0) {
+    GetFunctions();
+  }
+  JSFunction* next() {
+    if (functions_.length() == 0) return NULL;
+    JSFunction* next_function = functions_[index_];
+    index_--;
+    if (index_ < 0) {
+      GetFunctions();
+    }
+    return next_function;
+  }
+
+  // Iterate through functions until the first occurence of 'function'.
+  // Returns true if 'function' is found, and false if the iterator ends
+  // without finding it.
+  bool Find(JSFunction* function) {
+    JSFunction* next_function;
+    do {
+      next_function = next();
+      if (next_function == function) return true;
+    } while (next_function != NULL);
+    return false;
+  }
+ private:
+  void GetFunctions() {
+    functions_.Rewind(0);
+    if (frame_iterator_.done()) return;
+    JavaScriptFrame* frame = frame_iterator_.frame();
+    frame->GetFunctions(&functions_);
+    ASSERT(functions_.length() > 0);
+    frame_iterator_.Advance();
+    index_ = functions_.length() - 1;
+  }
+  JavaScriptFrameIterator frame_iterator_;
+  List<JSFunction*> functions_;
+  int index_;
+};
+
+
 MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
   Isolate* isolate = Isolate::Current();
   HandleScope scope(isolate);
@@ -689,38 +734,30 @@
   if (!found_it) return isolate->heap()->undefined_value();
   Handle<JSFunction> function(holder, isolate);
 
-  List<JSFunction*> functions(2);
-  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
-    JavaScriptFrame* frame = it.frame();
-    frame->GetFunctions(&functions);
-    for (int i = functions.length() - 1; i >= 0; i--) {
-      if (functions[i] == *function) {
-        // Once we have found the frame, we need to go to the caller
-        // frame. This may require skipping through a number of top-level
-        // frames, e.g. frames for scripts not functions.
-        if (i > 0) {
-          ASSERT(!functions[i - 1]->shared()->is_toplevel());
-          return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]);
-        } else {
-          for (it.Advance(); !it.done(); it.Advance()) {
-            frame = it.frame();
-            functions.Rewind(0);
-            frame->GetFunctions(&functions);
-            if (!functions.last()->shared()->is_toplevel()) {
-              return CheckNonStrictCallerOrThrow(isolate, functions.last());
-            }
-            ASSERT(functions.length() == 1);
-          }
-          if (it.done()) return isolate->heap()->null_value();
-          break;
-        }
-      }
-    }
-    functions.Rewind(0);
+  FrameFunctionIterator it(isolate, no_alloc);
+
+  // Find the function from the frames.
+  if (!it.Find(*function)) {
+    // No frame corresponding to the given function found. Return null.
+    return isolate->heap()->null_value();
   }
 
-  // No frame corresponding to the given function found. Return null.
-  return isolate->heap()->null_value();
+  // Find previously called non-toplevel function.
+  JSFunction* caller;
+  do {
+    caller = it.next();
+    if (caller == NULL) return isolate->heap()->null_value();
+  } while (caller->shared()->is_toplevel());
+
+  // If caller is a built-in function and caller's caller is also built-in,
+  // use that instead.
+  JSFunction* potential_caller = caller;
+  while (potential_caller != NULL && potential_caller->IsBuiltin()) {
+    caller = potential_caller;
+    potential_caller = it.next();
+  }
+
+  return CheckNonStrictCallerOrThrow(isolate, caller);
 }
 
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index ba345e2..ffe32bc 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -4367,6 +4367,8 @@
   __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
   __ b(gt, &runtime);
 
+  // Reset offset for possibly sliced string.
+  __ mov(r9, Operand(0));
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
@@ -4374,33 +4376,45 @@
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // First check for flat string.
-  __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
+  __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag !=0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
-  __ b(ne, &runtime);
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ cmp(r1, Operand(kExternalStringTag));
+  __ b(lt, &cons_string);
+  __ b(eq, &runtime);
+
+  // String is sliced.
+  __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ mov(r9, Operand(r9, ASR, kSmiTagSize));
+  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  // r9: offset of sliced string, smi-tagged.
+  __ jmp(&check_encoding);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
   __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
   __ cmp(r0, r1);
   __ b(ne, &runtime);
   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  // Is first part of cons or parent of slice a flat string?
+  __ bind(&check_encoding);
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  // Is first part a flat string?
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
   __ b(ne, &runtime);
-
   __ bind(&seq_string);
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -4466,21 +4480,30 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data and
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
-  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
-  // Argument 4 (r3): End of string data
-  // Argument 3 (r2): Start of string data
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to two pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+  __ ldr(r0, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 4, r3: End of string data
+  // Argument 3, r2: Start of string data
+  // Prepare start and end index of the input.
+  __ add(r9, r8, Operand(r9, LSL, r3));
   __ add(r2, r9, Operand(r1, LSL, r3));
-  __ add(r3, r9, Operand(r0, LSL, r3));
+
+  __ ldr(r8, FieldMemOperand(r0, String::kLengthOffset));
+  __ mov(r8, Operand(r8, ASR, kSmiTagSize));
+  __ add(r3, r9, Operand(r8, LSL, r3));
 
   // Argument 2 (r1): Previous index.
   // Already there
 
   // Argument 1 (r0): Subject string.
-  __ mov(r0, subject);
+  // Already there
 
   // Locate the code entry and call it.
   __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4497,12 +4520,12 @@
   // Check the result.
   Label success;
 
-  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  __ cmp(subject, Operand(NativeRegExpMacroAssembler::SUCCESS));
   __ b(eq, &success);
   Label failure;
-  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ cmp(subject, Operand(NativeRegExpMacroAssembler::FAILURE));
   __ b(eq, &failure);
-  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  __ cmp(subject, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   // If not exception it can only be retry. Handle that in the runtime system.
   __ b(ne, &runtime);
   // Result must now be exception. If there is no pending exception already a
@@ -4514,18 +4537,18 @@
   __ mov(r2, Operand(ExternalReference(Isolate::k_pending_exception_address,
                                        isolate)));
   __ ldr(r0, MemOperand(r2, 0));
-  __ cmp(r0, r1);
+  __ cmp(subject, r1);
   __ b(eq, &runtime);
 
   __ str(r1, MemOperand(r2, 0));  // Clear pending exception.
 
   // Check if the exception is a termination. If so, throw as uncatchable.
   __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
-  __ cmp(r0, ip);
+  __ cmp(subject, ip);
   Label termination_exception;
   __ b(eq, &termination_exception);
 
-  __ Throw(r0);  // Expects thrown value in r0.
+  __ Throw(subject);  // Expects thrown value in r0.
 
   __ bind(&termination_exception);
   __ ThrowUncatchable(TERMINATION, r0);  // Expects thrown value in r0.
@@ -4803,6 +4826,7 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -4832,7 +4856,11 @@
   __ b(eq, &flat_string);
 
   // Handle non-flat strings.
-  __ tst(result_, Operand(kIsConsStringMask));
+  __ and_(result_, result_, Operand(kStringRepresentationMask));
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ cmp(result_, Operand(kExternalStringTag));
+  __ b(gt, &sliced_string);
   __ b(eq, &call_runtime_);
 
   // ConsString.
@@ -4840,15 +4868,26 @@
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
   __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
   __ cmp(result_, Operand(ip));
   __ b(ne, &call_runtime_);
   // Get the first of the two strings and load its instance type.
   __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+  __ add(scratch_, scratch_, result_);
+  __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+  // Check that parent is not an external string. Go to runtime otherwise.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
   __ b(ne, &call_runtime_);
@@ -5428,10 +5467,17 @@
   // Check bounds and smi-ness.
   Register to = r6;
   Register from = r7;
+
+  if (FLAG_string_slices) {
+    __ nop(0);  // Jumping as first instruction would crash the code generation.
+    __ jmp(&runtime);
+  }
+
   __ Ldrd(to, from, MemOperand(sp, kToOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
   // I.e., arithmetic shift right by one un-smi-tags.
   __ mov(r2, Operand(to, ASR, 1), SetCC);
   __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
@@ -5440,7 +5486,6 @@
   __ b(mi, &runtime);  // From is negative.
 
   // Both to and from are smis.
-
   __ sub(r2, r2, Operand(r3), SetCC);
   __ b(mi, &runtime);  // Fail if from > to.
   // Special handling of sub-strings of length 1 and 2. One character strings
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 97f7fea..b58743d 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -47,7 +47,6 @@
 
 
 static unsigned GetPropertyId(Property* property) {
-  if (property->is_synthetic()) return AstNode::kNoNumber;
   return property->id();
 }
 
@@ -694,104 +693,73 @@
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
   Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
-
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ str(ip, MemOperand(fp, SlotOffset(slot)));
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current function
-        // context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check that we're not inside a with or catch context.
-          __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
-          __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
-          __ Check(ne, "Declaration in with context.");
-          __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
-          __ Check(ne, "Declaration in catch context.");
-        }
-        if (mode == Variable::CONST) {
-          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ str(ip, ContextOperand(cp, slot->index()));
-          // No write barrier since the_hole_value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ str(result_register(), ContextOperand(cp, slot->index()));
-          int offset = Context::SlotOffset(slot->index());
-          // We know that we have written a function, which is not a smi.
-          __ mov(r1, Operand(cp));
-          __ RecordWrite(r1, Operand(offset), r2, result_register());
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ mov(r2, Operand(variable->name()));
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR ||
-               mode == Variable::CONST ||
-               mode == Variable::LET);
-        PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
-        __ mov(r1, Operand(Smi::FromInt(attr)));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        if (mode == Variable::CONST) {
-          __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-          __ Push(cp, r2, r1, r0);
-        } else if (function != NULL) {
-          __ Push(cp, r2, r1);
-          // Push initial value for function declaration.
-          VisitForStackValue(function);
-        } else {
-          __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
-          __ Push(cp, r2, r1, r0);
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        break;
+  ASSERT(slot != NULL);
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      if (mode == Variable::CONST) {
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ str(ip, MemOperand(fp, SlotOffset(slot)));
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
       }
-    }
+      break;
 
-  } else if (prop != NULL) {
-    // A const declaration aliasing a parameter is an illegal redeclaration.
-    ASSERT(mode != Variable::CONST);
-    if (function != NULL) {
-      // We are declaring a function that rewrites to a property.
-      // Use (keyed) IC to set the initial value.  We cannot visit the
-      // rewrite because it's shared and we risk recording duplicate AST
-      // IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy());
+    case Slot::CONTEXT:
+      // We bypass the general EmitSlotSearch because we know more about
+      // this specific context.
+
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+        __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+        __ Check(ne, "Declaration in with context.");
+        __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+        __ Check(ne, "Declaration in catch context.");
       }
+      if (mode == Variable::CONST) {
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ str(ip, ContextOperand(cp, slot->index()));
+        // No write barrier since the_hole_value is in old space.
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ str(result_register(), ContextOperand(cp, slot->index()));
+        int offset = Context::SlotOffset(slot->index());
+        // We know that we have written a function, which is not a smi.
+        __ mov(r1, Operand(cp));
+        __ RecordWrite(r1, Operand(offset), r2, result_register());
+      }
+      break;
 
-      __ push(r0);
-      VisitForAccumulatorValue(function);
-      __ pop(r2);
-
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
-
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ Call(ic);
-      // Value in r0 is ignored (declarations are statements).
+    case Slot::LOOKUP: {
+      __ mov(r2, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of two modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ mov(r1, Operand(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (mode == Variable::CONST) {
+        __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+        __ Push(cp, r2, r1, r0);
+      } else if (function != NULL) {
+        __ Push(cp, r2, r1);
+        // Push initial value for function declaration.
+        VisitForStackValue(function);
+      } else {
+        __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
+        __ Push(cp, r2, r1, r0);
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
     }
   }
 }
@@ -2272,36 +2240,10 @@
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use EmitKeyedCallWithIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, r1);
-        __ ldr(r1, operand);
-
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
-        __ ldr(r1, GlobalObjectOperand());
-        __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
-        __ Push(r0, r1);  // Function, receiver.
-        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key());
+      { PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(prop->obj());
       }
+      EmitKeyedCallWithIC(expr, prop->key());
     }
   } else {
     { PreservePositionScope scope(masm()->positions_recorder());
@@ -3580,39 +3522,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // Load the function into r0.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Prepare for the test.
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  // Test for strict mode function.
-  __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                           kSmiTagSize)));
-  __ b(ne, if_true);
-
-  // Test for native function.
-  __ tst(r1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ b(ne, if_true);
-
-  // Not native or strict-mode function.
-  __ b(if_false);
-
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Handle<String> name = expr->name();
   if (name->length() > 0 && name->Get(0) == '_') {
@@ -3664,18 +3573,12 @@
       Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
 
       if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
-          __ push(r1);
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          context()->Plug(r0);
-        }
+        VisitForStackValue(prop->obj());
+        VisitForStackValue(prop->key());
+        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
+        __ push(r1);
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(r0);
       } else if (var != NULL) {
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 38f77cd..30d7a1c 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1999,8 +1999,8 @@
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
   LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 3469bb6..65a6169 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -3455,97 +3455,83 @@
     LStringCharCodeAt* instr_;
   };
 
-  Register scratch = scratch0();
   Register string = ToRegister(instr->string());
-  Register index = no_reg;
-  int const_index = -1;
-  if (instr->index()->IsConstantOperand()) {
-    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-    if (!Smi::IsValid(const_index)) {
-      // Guaranteed to be out of bounds because of the assert above.
-      // So the bounds check that must dominate this instruction must
-      // have deoptimized already.
-      if (FLAG_debug_code) {
-        __ Abort("StringCharCodeAt: out of bounds index.");
-      }
-      // No code needs to be generated.
-      return;
-    }
-  } else {
-    index = ToRegister(instr->index());
-  }
+  Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  Label flat_string, ascii_string, done;
-
   // Fetch the instance type of the receiver into result register.
   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result, Operand(kStringRepresentationMask));
-  __ b(eq, &flat_string);
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ tst(result, Operand(kIsIndirectStringMask));
+  __ b(eq, &check_sequential);
 
-  // Handle non-flat strings.
-  __ tst(result, Operand(kIsConsStringMask));
-  __ b(eq, deferred->entry());
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
+  ASSERT(IsPowerOf2(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
+  __ tst(result, Operand(kSlicedNotConsMask));
+  __ b(eq, &cons_string);
 
-  // ConsString.
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ add(index, index, Operand(result, ASR, kSmiTagSize));
+  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle conses.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
-  __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ bind(&cons_string);
+  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
   __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
-  __ cmp(scratch, ip);
+  __ cmp(result, ip);
   __ b(ne, deferred->entry());
   // Get the first of the two strings and load its instance type.
   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result, Operand(kStringRepresentationMask));
   __ b(ne, deferred->entry());
 
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ tst(result, Operand(kStringEncodingMask));
   __ b(ne, &ascii_string);
 
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  if (instr->index()->IsConstantOperand()) {
-    __ ldrh(result,
-            FieldMemOperand(string,
-                            SeqTwoByteString::kHeaderSize + 2 * const_index));
-  } else {
-    __ add(scratch,
-           string,
-           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-    __ ldrh(result, MemOperand(scratch, index, LSL, 1));
-  }
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  Label done;
+  __ add(result,
+         string,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ ldrh(result, MemOperand(result, index, LSL, 1));
   __ jmp(&done);
 
   // ASCII string.
   // Load the byte into the result register.
   __ bind(&ascii_string);
-  if (instr->index()->IsConstantOperand()) {
-    __ ldrb(result, FieldMemOperand(string,
-                                    SeqAsciiString::kHeaderSize + const_index));
-  } else {
-    __ add(scratch,
-           string,
-           Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-    __ ldrb(result, MemOperand(scratch, index));
-  }
+  __ add(result,
+         string,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ ldrb(result, MemOperand(result, index));
+
   __ bind(&done);
   __ bind(deferred->exit());
 }
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 983a528..81645c7 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -1034,12 +1034,13 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1057,8 +1058,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1069,8 +1082,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1078,13 +1091,14 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
     // addresses in the RegExp stack frame to match the new value.
     const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = end_address - start_address;
+    int byte_length = static_cast<int>(end_address - start_address);
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
diff --git a/src/array.js b/src/array.js
index e6c13d9..a12fdc8 100644
--- a/src/array.js
+++ b/src/array.js
@@ -742,8 +742,7 @@
       else return x < y ? -1 : 1;
     };
   }
-  var receiver =
-      %_IsNativeOrStrictMode(comparefn) ? void 0 : %GetGlobalReceiver();
+  var receiver = %GetDefaultReceiver(comparefn);
 
   function InsertionSort(a, from, to) {
     for (var i = from + 1; i < to; i++) {
diff --git a/src/ast.h b/src/ast.h
index 399d278..74182d5 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1231,21 +1231,14 @@
 
 class Property: public Expression {
  public:
-  // Synthetic properties are property lookups introduced by the system,
-  // to objects that aren't visible to the user. Function calls to synthetic
-  // properties should use the global object as receiver, not the base object
-  // of the resolved Reference.
-  enum Type { NORMAL, SYNTHETIC };
   Property(Isolate* isolate,
            Expression* obj,
            Expression* key,
-           int pos,
-           Type type = NORMAL)
+           int pos)
       : Expression(isolate),
         obj_(obj),
         key_(key),
         pos_(pos),
-        type_(type),
         is_monomorphic_(false),
         is_array_length_(false),
         is_string_length_(false),
@@ -1260,7 +1253,6 @@
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
   virtual int position() const { return pos_; }
-  bool is_synthetic() const { return type_ == SYNTHETIC; }
 
   bool IsStringLength() const { return is_string_length_; }
   bool IsStringAccess() const { return is_string_access_; }
@@ -1276,7 +1268,6 @@
   Expression* obj_;
   Expression* key_;
   int pos_;
-  Type type_;
 
   SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
diff --git a/src/d8.gyp b/src/d8.gyp
index 2d9af78..70186cf 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -26,6 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
+  'includes': ['../build/common.gypi'],
   'variables': {
     'console%': '',
   },
@@ -36,6 +37,7 @@
       'dependencies': [
         '../tools/gyp/v8.gyp:v8',
       ],
+      # Generated source files need this explicitly:
       'include_dirs+': [
         '../src',
       ],
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 033d924..8641261 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -336,6 +336,10 @@
     return malloc(size + frame_size - kPointerSize);
   }
 
+  void operator delete(void* pointer, uint32_t frame_size) {
+    free(pointer);
+  }
+
   void operator delete(void* description) {
     free(description);
   }
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 2d8f6fa..7df2b0b 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -104,6 +104,7 @@
 
 // Flags for experimental implementation features.
 DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
+DEFINE_bool(string_slices, false, "use string slices")
 
 // Flags for Crankshaft.
 #ifdef V8_TARGET_ARCH_MIPS
diff --git a/src/heap-inl.h b/src/heap-inl.h
index b08655c..7b666af 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -323,10 +323,10 @@
   ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
 
   if (type < FIRST_NONSTRING_TYPE) {
-    // There are three string representations: sequential strings, cons
-    // strings, and external strings.  Only cons strings contain
-    // non-map-word pointers to heap objects.
-    return ((type & kStringRepresentationMask) == kConsStringTag)
+    // There are four string representations: sequential strings, external
+    // strings, cons strings, and sliced strings.
+    // Only the latter two contain non-map-word pointers to heap objects.
+    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
         ? OLD_POINTER_SPACE
         : OLD_DATA_SPACE;
   } else {
diff --git a/src/heap.cc b/src/heap.cc
index e080cde..90d0e11 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1290,6 +1290,10 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<ConsString::kSize>);
 
+    table_.Register(kVisitSlicedString,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                        template VisitSpecialized<SlicedString::kSize>);
+
     table_.Register(kVisitSharedFunctionInfo,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                         template VisitSpecialized<SharedFunctionInfo::kSize>);
@@ -2564,6 +2568,8 @@
 
   // If the resulting string is small make a flat string.
   if (length < String::kMinNonFlatLength) {
+    // Note that neither of the two inputs can be a slice because:
+    STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
     ASSERT(first->IsFlat());
     ASSERT(second->IsFlat());
     if (is_ascii) {
@@ -2655,24 +2661,69 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
-  Object* result;
-  { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
-                   ? AllocateRawAsciiString(length, pretenure )
-                   : AllocateRawTwoByteString(length, pretenure);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  String* string_result = String::cast(result);
-  // Copy the characters into the new object.
-  if (buffer->IsAsciiRepresentation()) {
-    ASSERT(string_result->IsAsciiRepresentation());
-    char* dest = SeqAsciiString::cast(string_result)->GetChars();
-    String::WriteToFlat(buffer, dest, start, end);
-  } else {
-    ASSERT(string_result->IsTwoByteRepresentation());
-    uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
-    String::WriteToFlat(buffer, dest, start, end);
+  // TODO(1626): For now slicing external strings is not supported.  However,
+  // a flat cons string can have an external string as first part in some cases.
+  // Therefore we have to single out this case as well.
+  if (!FLAG_string_slices ||
+      (buffer->IsConsString() &&
+        (!buffer->IsFlat() ||
+         !ConsString::cast(buffer)->first()->IsSeqString())) ||
+      buffer->IsExternalString() ||
+      length < SlicedString::kMinLength ||
+      pretenure == TENURED) {
+    Object* result;
+    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
+                     ? AllocateRawAsciiString(length, pretenure)
+                     : AllocateRawTwoByteString(length, pretenure);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+    String* string_result = String::cast(result);
+    // Copy the characters into the new object.
+    if (buffer->IsAsciiRepresentation()) {
+      ASSERT(string_result->IsAsciiRepresentation());
+      char* dest = SeqAsciiString::cast(string_result)->GetChars();
+      String::WriteToFlat(buffer, dest, start, end);
+    } else {
+      ASSERT(string_result->IsTwoByteRepresentation());
+      uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
+      String::WriteToFlat(buffer, dest, start, end);
+    }
+    return result;
   }
 
+  ASSERT(buffer->IsFlat());
+  ASSERT(!buffer->IsExternalString());
+#if DEBUG
+  buffer->StringVerify();
+#endif
+
+  Object* result;
+  { Map* map = buffer->IsAsciiRepresentation()
+                 ? sliced_ascii_string_map()
+                 : sliced_string_map();
+    MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+
+  AssertNoAllocation no_gc;
+  SlicedString* sliced_string = SlicedString::cast(result);
+  sliced_string->set_length(length);
+  sliced_string->set_hash_field(String::kEmptyHashField);
+  if (buffer->IsConsString()) {
+    ConsString* cons = ConsString::cast(buffer);
+    ASSERT(cons->second()->length() == 0);
+    sliced_string->set_parent(cons->first());
+    sliced_string->set_offset(start);
+  } else if (buffer->IsSlicedString()) {
+    // Prevent nesting sliced strings.
+    SlicedString* parent_slice = SlicedString::cast(buffer);
+    sliced_string->set_parent(parent_slice->parent());
+    sliced_string->set_offset(start + parent_slice->offset());
+  } else {
+    sliced_string->set_parent(buffer);
+    sliced_string->set_offset(start);
+  }
+  ASSERT(sliced_string->parent()->IsSeqString());
   return result;
 }
 
diff --git a/src/heap.h b/src/heap.h
index c4ee4db..0f69fab 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -88,6 +88,8 @@
   V(Map, symbol_map, SymbolMap)                                                \
   V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, cons_ascii_string_map, ConsAsciiStringMap)                            \
+  V(Map, sliced_string_map, SlicedStringMap)                                   \
+  V(Map, sliced_ascii_string_map, SlicedAsciiStringMap)                        \
   V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
   V(Map, cons_symbol_map, ConsSymbolMap)                                       \
   V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap)                            \
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index d321aac..dd3a591 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -5074,19 +5074,13 @@
     // The subexpression does not have side effects.
     return ast_context()->ReturnValue(graph()->GetConstantFalse());
   } else if (prop != NULL) {
-    if (prop->is_synthetic()) {
-      // Result of deleting parameters is false, even when they rewrite
-      // to accesses on the arguments object.
-      return ast_context()->ReturnValue(graph()->GetConstantFalse());
-  } else {
-      CHECK_ALIVE(VisitForValue(prop->obj()));
-      CHECK_ALIVE(VisitForValue(prop->key()));
-      HValue* key = Pop();
-      HValue* obj = Pop();
-      HValue* context = environment()->LookupContext();
-      HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
-      return ast_context()->ReturnInstruction(instr, expr->id());
-    }
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    HValue* key = Pop();
+    HValue* obj = Pop();
+    HValue* context = environment()->LookupContext();
+    HDeleteProperty* instr = new(zone()) HDeleteProperty(context, obj, key);
+    return ast_context()->ReturnInstruction(instr, expr->id());
   } else if (var->is_global()) {
     Bailout("delete with global variable");
   } else {
@@ -6222,11 +6216,6 @@
 }
 
 
-void HGraphBuilder::GenerateIsNativeOrStrictMode(CallRuntime* call) {
-  return Bailout("inlined runtime function: IsNativeOrStrictMode");
-}
-
-
 #undef CHECK_BAILOUT
 #undef CHECK_ALIVE
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index e39d114..d76e4bf 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -493,10 +493,10 @@
     __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
     // If we have a match of the int32-but-not-Smi exponent then skip some
     // logic.
-    __ j(equal, &right_exponent);
+    __ j(equal, &right_exponent, Label::kNear);
     // If the exponent is higher than that then go to slow case.  This catches
     // numbers that don't fit in a signed int32, infinities and NaNs.
-    __ j(less, &normal_exponent);
+    __ j(less, &normal_exponent, Label::kNear);
 
     {
       // Handle a big exponent.  The only reason we have this code is that the
@@ -525,9 +525,9 @@
       __ or_(ecx, Operand(scratch2));
       // We have the answer in ecx, but we may need to negate it.
       __ test(scratch, Operand(scratch));
-      __ j(positive, &done);
+      __ j(positive, &done, Label::kNear);
       __ neg(ecx);
-      __ jmp(&done);
+      __ jmp(&done, Label::kNear);
     }
 
     __ bind(&normal_exponent);
@@ -540,7 +540,7 @@
         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
     __ sub(Operand(scratch2), Immediate(zero_exponent));
     // ecx already has a Smi zero.
-    __ j(less, &done);
+    __ j(less, &done, Label::kNear);
 
     // We have a shifted exponent between 0 and 30 in scratch2.
     __ shr(scratch2, HeapNumber::kExponentShift);
@@ -765,7 +765,7 @@
 
     Label slow_allocate_heapnumber, heapnumber_allocated;
     __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
-    __ jmp(&heapnumber_allocated);
+    __ jmp(&heapnumber_allocated, Label::kNear);
 
     __ bind(&slow_allocate_heapnumber);
     __ EnterInternalFrame();
@@ -1442,14 +1442,14 @@
   Register right = eax;
 
   // Test if left operand is a string.
-  __ JumpIfSmi(left, &call_runtime);
+  __ JumpIfSmi(left, &call_runtime, Label::kNear);
   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
-  __ j(above_equal, &call_runtime);
+  __ j(above_equal, &call_runtime, Label::kNear);
 
   // Test if right operand is a string.
-  __ JumpIfSmi(right, &call_runtime);
+  __ JumpIfSmi(right, &call_runtime, Label::kNear);
   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
-  __ j(above_equal, &call_runtime);
+  __ j(above_equal, &call_runtime, Label::kNear);
 
   StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
   GenerateRegisterArgsPush(masm);
@@ -1563,7 +1563,7 @@
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+        __ j(negative, &non_smi_result, Label::kNear);
       }
       // Tag smi result and return.
       __ SmiTag(eax);
@@ -1777,7 +1777,7 @@
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+        __ j(negative, &non_smi_result, Label::kNear);
       }
       // Tag smi result and return.
       __ SmiTag(eax);
@@ -1976,7 +1976,7 @@
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
-        __ j(negative, &non_smi_result);
+        __ j(negative, &non_smi_result, Label::kNear);
       }
       // Tag smi result and return.
       __ SmiTag(eax);
@@ -2451,7 +2451,7 @@
   Label load_arg2, done;
 
   // Test if arg1 is a Smi.
-  __ JumpIfNotSmi(edx, &arg1_is_object);
+  __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
 
   __ SmiUntag(edx);
   __ jmp(&load_arg2);
@@ -2477,7 +2477,7 @@
   __ bind(&load_arg2);
 
   // Test if arg2 is a Smi.
-  __ JumpIfNotSmi(eax, &arg2_is_object);
+  __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
 
   __ SmiUntag(eax);
   __ mov(ecx, eax);
@@ -2867,7 +2867,7 @@
 
   // Check that the key is a smi.
   Label slow;
-  __ JumpIfNotSmi(edx, &slow);
+  __ JumpIfNotSmi(edx, &slow, Label::kNear);
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
@@ -2880,7 +2880,7 @@
   // through register eax. Use unsigned comparison to get negative
   // check for free.
   __ cmp(edx, Operand(eax));
-  __ j(above_equal, &slow);
+  __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -2896,7 +2896,7 @@
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ cmp(edx, Operand(ecx));
-  __ j(above_equal, &slow);
+  __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -3175,11 +3175,11 @@
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
   __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor_frame);
+  __ j(equal, &adaptor_frame, Label::kNear);
 
   // Get the length from the frame.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
-  __ jmp(&try_allocate);
+  __ jmp(&try_allocate, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
   __ bind(&adaptor_frame);
@@ -3225,7 +3225,7 @@
   // If there are no actual arguments, we're done.
   Label done;
   __ test(ecx, Operand(ecx));
-  __ j(zero, &done);
+  __ j(zero, &done, Label::kNear);
 
   // Get the parameters pointer from the stack.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
@@ -3371,6 +3371,8 @@
   __ cmp(edx, Operand(eax));
   __ j(greater, &runtime);
 
+  // Reset offset for possibly sliced string.
+  __ Set(edi, Immediate(0));
   // ecx: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
   Label seq_ascii_string, seq_two_byte_string, check_code;
@@ -3381,36 +3383,45 @@
   __ and_(ebx,
           kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be a flat ascii string.
-  __ test(Operand(ebx),
+  __ and_(Operand(ebx),
           Immediate(kIsNotStringMask | kStringRepresentationMask));
-  __ j(zero, &seq_ascii_string);
+  __ j(zero, &seq_ascii_string, Label::kNear);
 
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ test(Operand(ebx),
-          Immediate(kIsNotStringMask | kExternalStringTag));
-  __ j(not_zero, &runtime);
-  // String is a cons string.
-  __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
-  __ cmp(Operand(edx), factory->empty_string());
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+  __ j(less, &cons_string);
+  __ j(equal, &runtime);
+
+  // String is sliced.
+  __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
+  __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
+  // edi: offset of sliced string, smi-tagged.
+  // eax: parent string.
+  __ jmp(&check_encoding, Label::kNear);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
+  __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
   __ j(not_equal, &runtime);
   __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+  __ bind(&check_encoding);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  // String is a cons string with empty second part.
-  // eax: first part of cons string.
-  // ebx: map of first part of cons string.
-  // Is first part a flat two byte string?
+  // eax: first part of cons string or parent of sliced string.
+  // ebx: map of first part of cons string or map of parent of sliced string.
+  // Is first part of cons or parent of slice a flat two byte string?
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
-  __ j(zero, &seq_two_byte_string);
+  __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be ascii.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask);
@@ -3420,14 +3431,14 @@
   // eax: subject string (flat ascii)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(edi, Immediate(1));  // Type is ascii.
-  __ jmp(&check_code);
+  __ Set(ecx, Immediate(1));  // Type is ascii.
+  __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
   // eax: subject string (flat two byte)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
-  __ Set(edi, Immediate(0));  // Type is two byte.
+  __ Set(ecx, Immediate(0));  // Type is two byte.
 
   __ bind(&check_code);
   // Check that the irregexp code has been generated for the actual string
@@ -3437,7 +3448,7 @@
 
   // eax: subject string
   // edx: code
-  // edi: encoding of subject string (1 if ascii, 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
   __ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3446,7 +3457,7 @@
   // eax: subject string
   // ebx: previous index
   // edx: code
-  // edi: encoding of subject string (1 if ascii 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3463,23 +3474,47 @@
   __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
 
   // Argument 6: Start (high end) of backtracking stack memory area.
-  __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
-  __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ mov(Operand(esp, 5 * kPointerSize), ecx);
+  __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+  __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+  __ mov(Operand(esp, 5 * kPointerSize), esi);
 
   // Argument 5: static offsets vector buffer.
   __ mov(Operand(esp, 4 * kPointerSize),
          Immediate(ExternalReference::address_of_static_offsets_vector(
              masm->isolate())));
 
+  // Argument 2: Previous index.
+  __ mov(Operand(esp, 1 * kPointerSize), ebx);
+
+  // Argument 1: Original subject string.
+  // The original subject is in the previous stack frame. Therefore we have to
+  // use ebp, which points exactly to one pointer size below the previous esp.
+  // (Because creating a new stack frame pushes the previous ebp onto the stack
+  // and thereby moves up esp by one kPointerSize.)
+  __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
+  __ mov(Operand(esp, 0 * kPointerSize), esi);
+
+  // esi: original subject string
+  // eax: underlying subject string
+  // ebx: previous index
+  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
+  // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
-  Label setup_two_byte, setup_rest;
-  __ test(edi, Operand(edi));
-  __ mov(edi, FieldOperand(eax, String::kLengthOffset));
-  __ j(zero, &setup_two_byte, Label::kNear);
+  // Prepare start and end index of the input.
+  // Load the length from the original sliced string if that is the case.
+  __ mov(esi, FieldOperand(esi, String::kLengthOffset));
+  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
   __ SmiUntag(edi);
-  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
+
+  // ebx: start index of the input string
+  // esi: end index of the input string
+  Label setup_two_byte, setup_rest;
+  __ test(ecx, Operand(ecx));
+  __ j(zero, &setup_two_byte, Label::kNear);
+  __ SmiUntag(esi);
+  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
@@ -3487,20 +3522,14 @@
 
   __ bind(&setup_two_byte);
   STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);  // edi is smi (powered by 2).
-  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
+  STATIC_ASSERT(kSmiTagSize == 1);  // esi is smi (powered by 2).
+  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
 
   __ bind(&setup_rest);
 
-  // Argument 2: Previous index.
-  __ mov(Operand(esp, 1 * kPointerSize), ebx);
-
-  // Argument 1: Subject string.
-  __ mov(Operand(esp, 0 * kPointerSize), eax);
-
   // Locate the code entry and call it.
   __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(Operand(edx));
@@ -3539,7 +3568,7 @@
   // by javascript code.
   __ cmp(eax, factory->termination_exception());
   Label throw_termination_exception;
-  __ j(equal, &throw_termination_exception);
+  __ j(equal, &throw_termination_exception, Label::kNear);
 
   // Handle normal exception by following handler chain.
   __ Throw(eax);
@@ -3822,16 +3851,16 @@
 void CompareStub::Generate(MacroAssembler* masm) {
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
-  Label check_unequal_objects, done;
+  Label check_unequal_objects;
 
   // Compare two smis if required.
   if (include_smi_compare_) {
     Label non_smi, smi_done;
     __ mov(ecx, Operand(edx));
     __ or_(ecx, Operand(eax));
-    __ JumpIfNotSmi(ecx, &non_smi);
+    __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
     __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
-    __ j(no_overflow, &smi_done);
+    __ j(no_overflow, &smi_done, Label::kNear);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
     __ mov(eax, edx);
@@ -3953,7 +3982,7 @@
     __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
            Immediate(masm->isolate()->factory()->heap_number_map()));
     // If heap number, handle it in the slow case.
-    __ j(equal, &slow);
+    __ j(equal, &slow, Label::kNear);
     // Return non-equal (ebx is not zero)
     __ mov(eax, ebx);
     __ ret(0);
@@ -4004,7 +4033,7 @@
       __ ucomisd(xmm0, xmm1);
 
       // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered);
+      __ j(parity_even, &unordered, Label::kNear);
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
@@ -4020,12 +4049,12 @@
       __ FCmp();
 
       // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered);
+      __ j(parity_even, &unordered, Label::kNear);
 
       Label below_label, above_label;
       // Return a result of -1, 0, or 1, based on EFLAGS.
-      __ j(below, &below_label);
-      __ j(above, &above_label);
+      __ j(below, &below_label, Label::kNear);
+      __ j(above, &above_label, Label::kNear);
 
       __ Set(eax, Immediate(0));
       __ ret(0);
@@ -4340,7 +4369,7 @@
   // If the returned exception is RETRY_AFTER_GC continue at retry label
   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
-  __ j(zero, &retry);
+  __ j(zero, &retry, Label::kNear);
 
   // Special handling of out of memory exceptions.
   __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
@@ -4460,11 +4489,11 @@
   ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
                                 masm->isolate());
   __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
-  __ j(not_equal, &not_outermost_js);
+  __ j(not_equal, &not_outermost_js, Label::kNear);
   __ mov(Operand::StaticVariable(js_entry_sp), ebp);
   __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   Label cont;
-  __ jmp(&cont);
+  __ jmp(&cont, Label::kNear);
   __ bind(&not_outermost_js);
   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   __ bind(&cont);
@@ -4705,26 +4734,26 @@
   __ bind(&not_js_object);
   // Before null, smi and string value checks, check that the rhs is a function
   // as for a non-function rhs an exception needs to be thrown.
-  __ JumpIfSmi(function, &slow);
+  __ JumpIfSmi(function, &slow, Label::kNear);
   __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
-  __ j(not_equal, &slow);
+  __ j(not_equal, &slow, Label::kNear);
 
   // Null is not instance of anything.
   __ cmp(object, factory->null_value());
-  __ j(not_equal, &object_not_null);
+  __ j(not_equal, &object_not_null, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null);
   // Smi values is not instance of anything.
-  __ JumpIfNotSmi(object, &object_not_null_or_smi);
+  __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null_or_smi);
   // String values is not instance of anything.
   Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
-  __ j(NegateCondition(is_string), &slow);
+  __ j(NegateCondition(is_string), &slow, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(1)));
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
@@ -4811,6 +4840,7 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   // If the receiver is a smi trigger the non-string case.
   STATIC_ASSERT(kSmiTag == 0);
@@ -4841,31 +4871,45 @@
   __ j(zero, &flat_string);
 
   // Handle non-flat strings.
-  __ test(result_, Immediate(kIsConsStringMask));
-  __ j(zero, &call_runtime_);
+  __ and_(result_, kStringRepresentationMask);
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ cmp(result_, kExternalStringTag);
+  __ j(greater, &sliced_string, Label::kNear);
+  __ j(equal, &call_runtime_);
 
   // ConsString.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
          Immediate(masm->isolate()->factory()->empty_string()));
   __ j(not_equal, &call_runtime_);
   // Get the first of the two strings and load its instance type.
   __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string, Label::kNear);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
+  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result_, Immediate(kStringRepresentationMask));
   __ j(not_zero, &call_runtime_);
+  __ jmp(&flat_string, Label::kNear);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ test(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string);
+  __ j(not_zero, &ascii_string, Label::kNear);
 
   // 2-byte string.
   // Load the 2-byte character code into the result register.
@@ -4873,7 +4917,7 @@
   __ movzx_w(result_, FieldOperand(object_,
                                    scratch_, times_1,  // Scratch is smi-tagged.
                                    SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code);
+  __ jmp(&got_char_code, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
@@ -5185,6 +5229,8 @@
   __ and_(ecx, kStringRepresentationMask);
   __ cmp(ecx, kExternalStringTag);
   __ j(equal, &string_add_runtime);
+  // We cannot encounter sliced strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
   // Now check if both strings are ascii strings.
   // eax: first string
   // ebx: length of resulting flat string as a smi
@@ -5596,6 +5642,9 @@
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
+  if (FLAG_string_slices) {
+    __ jmp(&runtime);
+  }
   // Stack frame on entry.
   //  esp[0]: return address
   //  esp[4]: to
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 07fd725..799ba73 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -46,7 +46,6 @@
 
 
 static unsigned GetPropertyId(Property* property) {
-  if (property->is_synthetic()) return AstNode::kNoNumber;
   return property->id();
 }
 
@@ -690,105 +689,73 @@
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
   Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
-
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ mov(Operand(ebp, SlotOffset(slot)),
-                 Immediate(isolate()->factory()->the_hole_value()));
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ mov(Operand(ebp, SlotOffset(slot)), result_register());
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current function
-        // context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check that we're not inside a with or catch context.
-          __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
-          __ cmp(ebx, isolate()->factory()->with_context_map());
-          __ Check(not_equal, "Declaration in with context.");
-          __ cmp(ebx, isolate()->factory()->catch_context_map());
-          __ Check(not_equal, "Declaration in catch context.");
-        }
-        if (mode == Variable::CONST) {
-          __ mov(ContextOperand(esi, slot->index()),
-                 Immediate(isolate()->factory()->the_hole_value()));
-          // No write barrier since the hole value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ mov(ContextOperand(esi, slot->index()), result_register());
-          int offset = Context::SlotOffset(slot->index());
-          __ mov(ebx, esi);
-          __ RecordWrite(ebx, offset, result_register(), ecx);
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ push(esi);
-        __ push(Immediate(variable->name()));
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR ||
-               mode == Variable::CONST ||
-               mode == Variable::LET);
-        PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
-        __ push(Immediate(Smi::FromInt(attr)));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        increment_stack_height(3);
-        if (mode == Variable::CONST) {
-          __ push(Immediate(isolate()->factory()->the_hole_value()));
-          increment_stack_height();
-        } else if (function != NULL) {
-          VisitForStackValue(function);
-        } else {
-          __ push(Immediate(Smi::FromInt(0)));  // No initial value!
-          increment_stack_height();
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        decrement_stack_height(4);
-        break;
+  ASSERT(slot != NULL);
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      if (mode == Variable::CONST) {
+        __ mov(Operand(ebp, SlotOffset(slot)),
+               Immediate(isolate()->factory()->the_hole_value()));
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ mov(Operand(ebp, SlotOffset(slot)), result_register());
       }
-    }
+      break;
 
-  } else if (prop != NULL) {
-    // A const declaration aliasing a parameter is an illegal redeclaration.
-    ASSERT(mode != Variable::CONST);
-    if (function != NULL) {
-      // We are declaring a function that rewrites to a property.
-      // Use (keyed) IC to set the initial value.  We cannot visit the
-      // rewrite because it's shared and we risk recording duplicate AST
-      // IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy());
+    case Slot::CONTEXT:
+      // We bypass the general EmitSlotSearch because we know more about
+      // this specific context.
+
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
+        __ cmp(ebx, isolate()->factory()->with_context_map());
+        __ Check(not_equal, "Declaration in with context.");
+        __ cmp(ebx, isolate()->factory()->catch_context_map());
+        __ Check(not_equal, "Declaration in catch context.");
       }
+      if (mode == Variable::CONST) {
+        __ mov(ContextOperand(esi, slot->index()),
+               Immediate(isolate()->factory()->the_hole_value()));
+        // No write barrier since the hole value is in old space.
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ mov(ContextOperand(esi, slot->index()), result_register());
+        int offset = Context::SlotOffset(slot->index());
+        __ mov(ebx, esi);
+        __ RecordWrite(ebx, offset, result_register(), ecx);
+      }
+      break;
 
-      __ push(eax);
-      increment_stack_height();
-      VisitForAccumulatorValue(function);
-      __ pop(edx);
-      decrement_stack_height();
-
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
-
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ call(ic);
+    case Slot::LOOKUP: {
+      __ push(esi);
+      __ push(Immediate(variable->name()));
+      // Declaration nodes are always introduced in one of two modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ push(Immediate(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      increment_stack_height(3);
+      if (mode == Variable::CONST) {
+        __ push(Immediate(isolate()->factory()->the_hole_value()));
+        increment_stack_height();
+      } else if (function != NULL) {
+        VisitForStackValue(function);
+      } else {
+        __ push(Immediate(Smi::FromInt(0)));  // No initial value!
+        increment_stack_height();
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      decrement_stack_height(4);
+      break;
     }
   }
 }
@@ -1824,21 +1791,11 @@
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
       increment_stack_height();
-      if (prop->is_synthetic()) {
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        { AccumulatorValueContext for_object(this);
-          EmitVariableLoad(prop->obj()->AsVariableProxy());
-        }
-        __ mov(edx, eax);
-        __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle()));
-      } else {
-        VisitForStackValue(prop->obj());
-        VisitForAccumulatorValue(prop->key());
-        __ mov(ecx, eax);
-        __ pop(edx);
-        decrement_stack_height();
-      }
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ mov(ecx, eax);
+      __ pop(edx);
+      decrement_stack_height();
       __ pop(eax);  // Restore value.
       decrement_stack_height();
       Handle<Code> ic = is_strict_mode()
@@ -2275,40 +2232,10 @@
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use EmitKeyedCallWithIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, edx);
-        __ mov(edx, operand);
-
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ mov(eax, prop->key()->AsLiteral()->handle());
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
-        // Push result (function).
-        __ push(eax);
-        increment_stack_height();
-        // Push Global receiver.
-        __ mov(ecx, GlobalObjectOperand());
-        __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
-        increment_stack_height();
-        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key());
+      { PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(prop->obj());
       }
+      EmitKeyedCallWithIC(expr, prop->key());
     }
   } else {
     { PreservePositionScope scope(masm()->positions_recorder());
@@ -3633,39 +3560,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // Load the function into eax.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Prepare for the test.
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  // Test for strict mode function.
-  __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-  __ j(not_equal, if_true);
-
-  // Test for native function.
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-            1 << SharedFunctionInfo::kNativeBitWithinByte);
-  __ j(not_equal, if_true);
-
-  // Not native or strict-mode function.
-  __ jmp(if_false);
-
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Handle<String> name = expr->name();
   if (name->length() > 0 && name->Get(0) == '_') {
@@ -3721,18 +3615,12 @@
       Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
 
       if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ push(Immediate(Smi::FromInt(strict_mode_flag())));
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          decrement_stack_height(2);
-          context()->Plug(eax);
-        }
+        VisitForStackValue(prop->obj());
+        VisitForStackValue(prop->key());
+        __ push(Immediate(Smi::FromInt(strict_mode_flag())));
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        decrement_stack_height(2);
+        context()->Plug(eax);
       } else if (var != NULL) {
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is.
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 71fe8d9..5f67038 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -3217,95 +3217,81 @@
   };
 
   Register string = ToRegister(instr->string());
-  Register index = no_reg;
-  int const_index = -1;
-  if (instr->index()->IsConstantOperand()) {
-    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-    if (!Smi::IsValid(const_index)) {
-      // Guaranteed to be out of bounds because of the assert above.
-      // So the bounds check that must dominate this instruction must
-      // have deoptimized already.
-      if (FLAG_debug_code) {
-        __ Abort("StringCharCodeAt: out of bounds index.");
-      }
-      // No code needs to be generated.
-      return;
-    }
-  } else {
-    index = ToRegister(instr->index());
-  }
+  Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  Label flat_string, ascii_string, done;
-
   // Fetch the instance type of the receiver into result register.
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string, Label::kNear);
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ test(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
 
-  // Handle non-flat strings.
-  __ test(result, Immediate(kIsConsStringMask));
-  __ j(zero, deferred->entry());
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
+  ASSERT(IsPowerOf2(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
+  __ test(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
 
-  // ConsString.
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ SmiUntag(result);
+  __ add(index, Operand(result));
+  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle conses.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  __ bind(&cons_string);
   __ cmp(FieldOperand(string, ConsString::kSecondOffset),
          Immediate(factory()->empty_string()));
   __ j(not_equal, deferred->entry());
-  // Get the first of the two strings and load its instance type.
   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
   STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result, Immediate(kStringRepresentationMask));
   __ j(not_zero, deferred->entry());
 
-  // Check for ASCII or two-byte string.
-  __ bind(&flat_string);
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ test(result, Immediate(kStringEncodingMask));
   __ j(not_zero, &ascii_string, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
+  Label done;
   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzx_w(result,
-               FieldOperand(string,
-                            SeqTwoByteString::kHeaderSize +
-                            (kUC16Size * const_index)));
-  } else {
-    __ movzx_w(result, FieldOperand(string,
-                                    index,
-                                    times_2,
-                                    SeqTwoByteString::kHeaderSize));
-  }
+  __ movzx_w(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
   __ jmp(&done, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
   __ bind(&ascii_string);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzx_b(result, FieldOperand(string,
-                                    SeqAsciiString::kHeaderSize + const_index));
-  } else {
-    __ movzx_b(result, FieldOperand(string,
-                                    index,
-                                    times_1,
-                                    SeqAsciiString::kHeaderSize));
-  }
+  __ movzx_b(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
   __ bind(&done);
   __ bind(deferred->exit());
 }
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index bb92e89..34c5beb 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2058,8 +2058,8 @@
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
   LOperand* context = UseAny(instr->context());
   LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 8db2e9b..7d7de0e 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1065,12 +1065,13 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1088,8 +1089,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1100,8 +1113,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1109,13 +1122,14 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
     // addresses in the RegExp stack frame to match the new value.
     const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = end_address - start_address;
+    int byte_length = static_cast<int>(end_address - start_address);
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 62d93a7..192fbf0 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -224,9 +224,9 @@
 
   if (!subject->IsFlat()) FlattenString(subject);
   AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
-  // Extract flattened substrings of cons strings before determining asciiness.
 
   String* needle = String::cast(re->DataAt(JSRegExp::kAtomPatternIndex));
+  ASSERT(StringShape(needle).IsSequential());
   int needle_len = needle->length();
   ASSERT(needle->IsFlat());
 
@@ -347,10 +347,7 @@
   JSRegExp::Flags flags = re->GetFlags();
 
   Handle<String> pattern(re->Pattern());
-  if (!pattern->IsFlat()) {
-    FlattenString(pattern);
-  }
-
+  if (!pattern->IsFlat()) FlattenString(pattern);
   RegExpCompileData compile_data;
   FlatStringReader reader(isolate, pattern);
   if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
@@ -434,22 +431,12 @@
 
 int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
                                 Handle<String> subject) {
-  if (!subject->IsFlat()) {
-    FlattenString(subject);
-  }
+  if (!subject->IsFlat()) FlattenString(subject);
+
   // Check the asciiness of the underlying storage.
-  bool is_ascii;
-  {
-    AssertNoAllocation no_gc;
-    String* sequential_string = *subject;
-    if (subject->IsConsString()) {
-      sequential_string = ConsString::cast(*subject)->first();
-    }
-    is_ascii = sequential_string->IsAsciiRepresentation();
-  }
-  if (!EnsureCompiledIrregexp(regexp, is_ascii)) {
-    return -1;
-  }
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+  if (!EnsureCompiledIrregexp(regexp, is_ascii)) return -1;
+
 #ifdef V8_INTERPRETED_REGEXP
   // Byte-code regexp needs space allocated for all its registers.
   return IrregexpNumberOfRegisters(FixedArray::cast(regexp->data()));
@@ -474,15 +461,11 @@
   ASSERT(index <= subject->length());
   ASSERT(subject->IsFlat());
 
-  // A flat ASCII string might have a two-byte first part.
-  if (subject->IsConsString()) {
-    subject = Handle<String>(ConsString::cast(*subject)->first(), isolate);
-  }
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
 #ifndef V8_INTERPRETED_REGEXP
   ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
   do {
-    bool is_ascii = subject->IsAsciiRepresentation();
     EnsureCompiledIrregexp(regexp, is_ascii);
     Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
     NativeRegExpMacroAssembler::Result res =
@@ -510,13 +493,13 @@
     // being internal and external, and even between being ASCII and UC16,
     // but the characters are always the same).
     IrregexpPrepare(regexp, subject);
+    is_ascii = subject->IsAsciiRepresentationUnderneath();
   } while (true);
   UNREACHABLE();
   return RE_EXCEPTION;
 #else  // V8_INTERPRETED_REGEXP
 
   ASSERT(output.length() >= IrregexpNumberOfRegisters(*irregexp));
-  bool is_ascii = subject->IsAsciiRepresentation();
   // We must have done EnsureCompiledIrregexp, so we can get the number of
   // registers.
   int* register_vector = output.start();
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 0961350..3e4a617 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -394,6 +394,10 @@
                                       ConsString::BodyDescriptor,
                                       void>::Visit);
 
+    table_.Register(kVisitSlicedString,
+                    &FixedBodyVisitor<StaticMarkingVisitor,
+                                      SlicedString::BodyDescriptor,
+                                      void>::Visit);
 
     table_.Register(kVisitFixedArray,
                     &FlexibleBodyVisitor<StaticMarkingVisitor,
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index a16cd80..e5077be 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -127,38 +127,38 @@
 
 const Register no_reg = { -1 };
 
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
+const Register zero_reg = { 0 };  // Always zero.
+const Register at = { 1 };   // at: Reserved for synthetic instructions.
+const Register v0 = { 2 };   // v0, v1: Used when returning multiple values
+const Register v1 = { 3 };   //   from subroutines.
+const Register a0 = { 4 };   // a0 - a4: Used to pass non-FP parameters.
 const Register a1 = { 5 };
 const Register a2 = { 6 };
 const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
+const Register t0 = { 8 };   // t0 - t9: Can be used without reservation, act
+const Register t1 = { 9 };   //   as temporary registers and are allowed to
+const Register t2 = { 10 };  //   be destroyed by subroutines.
 const Register t3 = { 11 };
 const Register t4 = { 12 };
 const Register t5 = { 13 };
 const Register t6 = { 14 };
 const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
+const Register s0 = { 16 };  // s0 - s7: Subroutine register variables.
+const Register s1 = { 17 };  //   Subroutines that write to these registers
+const Register s2 = { 18 };  //   must restore their values before exiting so
+const Register s3 = { 19 };  //   that the caller can expect the values to be
+const Register s4 = { 20 };  //   preserved.
 const Register s5 = { 21 };
 const Register s6 = { 22 };
 const Register s7 = { 23 };
 const Register t8 = { 24 };
 const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
+const Register k0 = { 26 };  // k0, k1: Reserved for system calls and
+const Register k1 = { 27 };  // interrupt handlers.
+const Register gp = { 28 };  // gp: Reserved.
+const Register sp = { 29 };  // sp: Stack pointer.
+const Register s8_fp = { 30 };  // fp: Frame pointer.
+const Register ra = { 31 };  // ra: Return address pointer.
 
 
 int ToNumber(Register reg);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index d89d3e5..2526a6a 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -2506,7 +2506,7 @@
         CpuFeatures::Scope scope(FPU);
         __ mtc1(a2, f0);
         if (op_ == Token::SHR) {
-          __ Cvt_d_uw(f0, f0);
+          __ Cvt_d_uw(f0, f0, f22);
         } else {
           __ cvt_d_w(f0, f0);
         }
@@ -2920,7 +2920,7 @@
         } else {
           // The result must be interpreted as an unsigned 32-bit integer.
           __ mtc1(a2, double_scratch);
-          __ Cvt_d_uw(double_scratch, double_scratch);
+          __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
         }
 
         // Store the result.
@@ -3693,10 +3693,10 @@
   // args
 
   // Save callee saved registers on the stack.
-  __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
+  __ MultiPush(kCalleeSaved | ra.bit());
 
   // Load argv in s0 register.
-  __ lw(s0, MemOperand(sp, kNumCalleeSaved * kPointerSize +
+  __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
                            StandardFrameConstants::kCArgsSlotsSize));
 
   // We build an EntryFrame.
@@ -3830,7 +3830,7 @@
   __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
 
   // Restore callee saved registers from the stack.
-  __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
+  __ MultiPop(kCalleeSaved | ra.bit());
   // Return.
   __ Jump(ra);
 }
@@ -4517,6 +4517,9 @@
   __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
   __ sra(at, a0, kSmiTagSize);  // Untag length for comparison.
   __ Branch(&runtime, gt, a2, Operand(at));
+
+  // Reset offset for possibly sliced string.
+  __ mov(t0, zero_reg);
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
@@ -4531,22 +4534,34 @@
   // subject: Subject string
   // a0: instance type if Subject string
   // regexp_data: RegExp data (FixedArray)
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ And(at, a0, Operand(kIsNotStringMask | kExternalStringTag));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ Branch(&cons_string, lt, at, Operand(kExternalStringTag));
+  __ Branch(&runtime, eq, at, Operand(kExternalStringTag));
+
+  // String is sliced.
+  __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ sra(t0, t0, kSmiTagSize);
+  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  // t5: offset of sliced string, smi-tagged.
+  __ jmp(&check_encoding);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
   __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
   __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
   __ Branch(&runtime, ne, a0, Operand(a1));
   __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+  // Is first part of cons or parent of slice a flat string?
+  __ bind(&check_encoding);
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  // Is first part a flat string?
   STATIC_ASSERT(kSeqStringTag == 0);
   __ And(at, a0, Operand(kStringRepresentationMask));
   __ Branch(&runtime, ne, at, Operand(zero_reg));
@@ -4562,8 +4577,8 @@
   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
   __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
-  __ lw(t0, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
-  __ movz(t9, t0, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
 
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
@@ -4630,23 +4645,32 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data
   // and calculate the shift of the index (0 for ASCII and 1 for two byte).
-  __ lw(a0, FieldMemOperand(subject, String::kLengthOffset));
-  __ sra(a0, a0, kSmiTagSize);
   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ Addu(t0, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
-  // Argument 4 (a3): End of string data
-  // Argument 3 (a2): Start of string data
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to two pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+  __ lw(a0, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 4, a3: End of string data
+  // Argument 3, a2: Start of string data
+  // Prepare start and end index of the input.
+  __ sllv(t1, t0, a3);
+  __ addu(t0, t2, t1);
   __ sllv(t1, a1, a3);
   __ addu(a2, t0, t1);
-  __ sllv(t1, a0, a3);
-  __ addu(a3, t0, t1);
 
+  __ lw(t2, FieldMemOperand(a0, String::kLengthOffset));
+  __ sra(t2, t2, kSmiTagSize);
+  __ sllv(t1, t2, a3);
+  __ addu(a3, t0, t1);
   // Argument 2 (a1): Previous index.
   // Already there
 
   // Argument 1 (a0): Subject string.
-  __ mov(a0, subject);
+  // Already there
 
   // Locate the code entry and call it.
   __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4663,11 +4687,14 @@
   // Check the result.
 
   Label success;
-  __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  __ Branch(&success, eq,
+            subject, Operand(NativeRegExpMacroAssembler::SUCCESS));
   Label failure;
-  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ Branch(&failure, eq,
+            subject, Operand(NativeRegExpMacroAssembler::FAILURE));
   // If not exception it can only be retry. Handle that in the runtime system.
-  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  __ Branch(&runtime, ne,
+            subject, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   // Result must now be exception. If there is no pending exception already a
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
@@ -4678,16 +4705,16 @@
   __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
                                       masm->isolate())));
   __ lw(v0, MemOperand(a2, 0));
-  __ Branch(&runtime, eq, v0, Operand(a1));
+  __ Branch(&runtime, eq, subject, Operand(a1));
 
   __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
 
   // Check if the exception is a termination. If so, throw as uncatchable.
   __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
   Label termination_exception;
-  __ Branch(&termination_exception, eq, v0, Operand(a0));
+  __ Branch(&termination_exception, eq, subject, Operand(a0));
 
-  __ Throw(a0);  // Expects thrown value in v0.
+  __ Throw(subject);  // Expects thrown value in v0.
 
   __ bind(&termination_exception);
   __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
@@ -4963,6 +4990,7 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   ASSERT(!t0.is(scratch_));
   ASSERT(!t0.is(index_));
@@ -4996,23 +5024,37 @@
   __ Branch(&flat_string, eq, t0, Operand(zero_reg));
 
   // Handle non-flat strings.
-  __ And(t0, result_, Operand(kIsConsStringMask));
-  __ Branch(&call_runtime_, eq, t0, Operand(zero_reg));
+  __ And(result_, result_, Operand(kStringRepresentationMask));
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
+  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
 
   // ConsString.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
   __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
   __ Branch(&call_runtime_, ne, result_, Operand(t0));
 
   // Get the first of the two strings and load its instance type.
   __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+  __ addu(scratch_, scratch_, result_);
+  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+  // Check that parent is not an external string. Go to runtime otherwise.
   STATIC_ASSERT(kSeqStringTag == 0);
 
   __ And(t0, result_, Operand(kStringRepresentationMask));
@@ -5583,6 +5625,11 @@
   Register to = t2;
   Register from = t3;
 
+  if (FLAG_string_slices) {
+    __ nop();  // Jumping as first instruction would crash the code generation.
+    __ jmp(&sub_string_runtime);
+  }
+
   // Check bounds and smi-ness.
   __ lw(to, MemOperand(sp, kToOffset));
   __ lw(from, MemOperand(sp, kFromOffset));
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 8c605a3..1899843 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -59,10 +59,10 @@
   // Saved temporaries.
   1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 |
   1 << 20 | 1 << 21 | 1 << 22 | 1 << 23 |
-  // gp, sp, fp.
-  1 << 28 | 1 << 29 | 1 << 30;
+  // fp.
+  1 << 30;
 
-static const int kNumCalleeSaved = 11;
+static const int kNumCalleeSaved = 9;
 
 
 // Number of registers for which space is reserved in safepoints. Must be a
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index e1a23b4..cf48ccf 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -55,7 +55,6 @@
 
 
 static unsigned GetPropertyId(Property* property) {
-  if (property->is_synthetic()) return AstNode::kNoNumber;
   return property->id();
 }
 
@@ -697,109 +696,77 @@
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
   Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
-
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
-          __ sw(t0, MemOperand(fp, SlotOffset(slot)));
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current function
-        // context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check that we're not inside a with or catch context.
-          __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
-          __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
-          __ Check(ne, "Declaration in with context.",
-                   a1, Operand(t0));
-          __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
-          __ Check(ne, "Declaration in catch context.",
-                   a1, Operand(t0));
-        }
-        if (mode == Variable::CONST) {
-          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-          __ sw(at, ContextOperand(cp, slot->index()));
-          // No write barrier since the_hole_value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ sw(result_register(), ContextOperand(cp, slot->index()));
-          int offset = Context::SlotOffset(slot->index());
-          // We know that we have written a function, which is not a smi.
-          __ mov(a1, cp);
-          __ RecordWrite(a1, Operand(offset), a2, result_register());
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ li(a2, Operand(variable->name()));
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR ||
-               mode == Variable::CONST ||
-               mode == Variable::LET);
-        PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
-        __ li(a1, Operand(Smi::FromInt(attr)));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        if (mode == Variable::CONST) {
-          __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
-          __ Push(cp, a2, a1, a0);
-        } else if (function != NULL) {
-          __ Push(cp, a2, a1);
-          // Push initial value for function declaration.
-          VisitForStackValue(function);
-        } else {
-          ASSERT(Smi::FromInt(0) == 0);
-          // No initial value!
-          __ mov(a0, zero_reg);  // Operand(Smi::FromInt(0)));
-          __ Push(cp, a2, a1, a0);
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        break;
+  ASSERT(slot != NULL);
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      if (mode == Variable::CONST) {
+        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+        __ sw(t0, MemOperand(fp, SlotOffset(slot)));
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ sw(result_register(), MemOperand(fp, SlotOffset(slot)));
       }
-    }
+      break;
 
-  } else if (prop != NULL) {
-    // A const declaration aliasing a parameter is an illegal redeclaration.
-    ASSERT(mode != Variable::CONST);
-    if (function != NULL) {
-      // We are declaring a function that rewrites to a property.
-      // Use (keyed) IC to set the initial value.  We cannot visit the
-      // rewrite because it's shared and we risk recording duplicate AST
-      // IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy());
+    case Slot::CONTEXT:
+      // We bypass the general EmitSlotSearch because we know more about
+      // this specific context.
+
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+        __ LoadRoot(t0, Heap::kWithContextMapRootIndex);
+        __ Check(ne, "Declaration in with context.",
+                 a1, Operand(t0));
+        __ LoadRoot(t0, Heap::kCatchContextMapRootIndex);
+        __ Check(ne, "Declaration in catch context.",
+                 a1, Operand(t0));
       }
+      if (mode == Variable::CONST) {
+        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+        __ sw(at, ContextOperand(cp, slot->index()));
+        // No write barrier since the_hole_value is in old space.
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ sw(result_register(), ContextOperand(cp, slot->index()));
+        int offset = Context::SlotOffset(slot->index());
+        // We know that we have written a function, which is not a smi.
+        __ mov(a1, cp);
+        __ RecordWrite(a1, Operand(offset), a2, result_register());
+      }
+      break;
 
-      __ push(result_register());
-      VisitForAccumulatorValue(function);
-      __ mov(a0, result_register());
-      __ pop(a2);
-
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ li(a1, Operand(prop->key()->AsLiteral()->handle()));
-
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ Call(ic);
-      // Value in v0 is ignored (declarations are statements).
+    case Slot::LOOKUP: {
+      __ li(a2, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of two modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ li(a1, Operand(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (mode == Variable::CONST) {
+        __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+        __ Push(cp, a2, a1, a0);
+      } else if (function != NULL) {
+        __ Push(cp, a2, a1);
+        // Push initial value for function declaration.
+        VisitForStackValue(function);
+      } else {
+        ASSERT(Smi::FromInt(0) == 0);
+        // No initial value!
+        __ mov(a0, zero_reg);  // Operand(Smi::FromInt(0)));
+        __ Push(cp, a2, a1, a0);
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
     }
   }
 }
@@ -2286,36 +2253,10 @@
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use EmitKeyedCallWithIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, a1);
-        __ lw(a1, operand);
-
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ li(a0, Operand(prop->key()->AsLiteral()->handle()));
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
-        __ lw(a1, GlobalObjectOperand());
-        __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
-        __ Push(v0, a1);  // Function, receiver.
-        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key());
+      { PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(prop->obj());
       }
+      EmitKeyedCallWithIC(expr, prop->key());
     }
   } else {
     { PreservePositionScope scope(masm()->positions_recorder());
@@ -3602,39 +3543,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // Load the function into v0.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Prepare for the test.
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  // Test for strict mode function.
-  __ lw(a1, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a1, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ And(at, a1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                               kSmiTagSize)));
-  __ Branch(if_true, ne, at, Operand(zero_reg));
-
-  // Test for native function.
-  __ And(at, a1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ Branch(if_true, ne, at, Operand(zero_reg));
-
-  // Not native or strict-mode function.
-  __ Branch(if_false);
-
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Handle<String> name = expr->name();
   if (name->length() > 0 && name->Get(0) == '_') {
@@ -3686,18 +3594,12 @@
       Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
 
       if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
-          __ push(a1);
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          context()->Plug(v0);
-        }
+        VisitForStackValue(prop->obj());
+        VisitForStackValue(prop->key());
+        __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
+        __ push(a1);
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(v0);
       } else if (var != NULL) {
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is.
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index da39962..85cb916 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -885,8 +885,8 @@
   MemOperand unmapped_location =
       GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
   __ lw(a2, unmapped_location);
-  __ Branch(&slow, eq, a2, Operand(a3));
   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+  __ Branch(&slow, eq, a2, Operand(a3));
   __ mov(v0, a2);
   __ Ret();
   __ bind(&slow);
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 76b713f..8e4b8ef 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -757,15 +757,20 @@
                          uint16_t pos,
                          uint16_t size) {
   ASSERT(pos < 32);
-  ASSERT(pos + size < 32);
+  ASSERT(pos + size < 33);
 
   if (mips32r2) {
     ext_(rt, rs, pos, size);
   } else {
     // Move rs to rt and shift it left then right to get the
     // desired bitfield on the right side and zeroes on the left.
-    sll(rt, rs, 32 - (pos + size));
-    srl(rt, rt, 32 - size);
+    int shift_left = 32 - (pos + size);
+    sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
+
+    int shift_right = 32 - size;
+    if (shift_right > 0) {
+      srl(rt, rt, shift_right);
+    }
   }
 }
 
@@ -807,28 +812,32 @@
 }
 
 
-void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
-  // Move the data from fs to t4.
-  mfc1(t4, fs);
-  return Cvt_d_uw(fd, t4);
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+                              FPURegister fs,
+                              FPURegister scratch) {
+  // Move the data from fs to t8.
+  mfc1(t8, fs);
+  Cvt_d_uw(fd, t8, scratch);
 }
 
 
-void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+                              Register rs,
+                              FPURegister scratch) {
   // Convert rs to a FP value in fd (and fd + 1).
   // We do this by converting rs minus the MSB to avoid sign conversion,
-  // then adding 2^31-1 and 1 to the result.
+  // then adding 2^31 to the result (if needed).
 
-  ASSERT(!fd.is(f20));
+  ASSERT(!fd.is(scratch));
   ASSERT(!rs.is(t9));
-  ASSERT(!rs.is(t8));
+  ASSERT(!rs.is(at));
 
-  // Save rs's MSB to t8.
-  And(t8, rs, 0x80000000);
+  // Save rs's MSB to t9.
+  Ext(t9, rs, 31, 1);
   // Remove rs's MSB.
-  And(t9, rs, 0x7FFFFFFF);
-  // Move t9 to fd.
-  mtc1(t9, fd);
+  Ext(at, rs, 0, 31);
+  // Move the result to fd.
+  mtc1(at, fd);
 
   // Convert fd to a real FP value.
   cvt_d_w(fd, fd);
@@ -837,41 +846,39 @@
 
   // If rs's MSB was 0, it's done.
   // Otherwise we need to add that to the FP register.
-  Branch(&conversion_done, eq, t8, Operand(zero_reg));
+  Branch(&conversion_done, eq, t9, Operand(zero_reg));
 
-  // First load 2^31 - 1 into f20.
-  Or(t9, zero_reg, 0x7FFFFFFF);
-  mtc1(t9, f20);
+  // Load 2^31 into f20 as its float representation.
+  li(at, 0x41E00000);
+  mtc1(at, FPURegister::from_code(scratch.code() + 1));
+  mtc1(zero_reg, scratch);
+  // Add it to fd.
+  add_d(fd, fd, scratch);
 
-  // Convert it to FP and add it to fd.
-  cvt_d_w(f20, f20);
-  add_d(fd, fd, f20);
-  // Now add 1.
-  Or(t9, zero_reg, 1);
-  mtc1(t9, f20);
-
-  cvt_d_w(f20, f20);
-  add_d(fd, fd, f20);
   bind(&conversion_done);
 }
 
 
-void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
-  Trunc_uw_d(fs, t4);
-  mtc1(t4, fd);
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+                                FPURegister fs,
+                                FPURegister scratch) {
+  Trunc_uw_d(fs, t8, scratch);
+  mtc1(t8, fd);
 }
 
 
-void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
-  ASSERT(!fd.is(f22));
-  ASSERT(!rs.is(t8));
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+                                Register rs,
+                                FPURegister scratch) {
+  ASSERT(!fd.is(scratch));
+  ASSERT(!rs.is(at));
 
-  // Load 2^31 into f22.
-  Or(t8, zero_reg, 0x80000000);
-  Cvt_d_uw(f22, t8);
-
-  // Test if f22 > fd.
-  c(OLT, D, fd, f22);
+  // Load 2^31 into scratch as its float representation.
+  li(at, 0x41E00000);
+  mtc1(at, FPURegister::from_code(scratch.code() + 1));
+  mtc1(zero_reg, scratch);
+  // Test if scratch > fd.
+  c(OLT, D, fd, scratch);
 
   Label simple_convert;
   // If fd < 2^31 we can convert it normally.
@@ -879,18 +886,17 @@
 
   // First we subtract 2^31 from fd, then trunc it to rs
   // and add 2^31 to rs.
-
-  sub_d(f22, fd, f22);
-  trunc_w_d(f22, f22);
-  mfc1(rs, f22);
-  or_(rs, rs, t8);
+  sub_d(scratch, fd, scratch);
+  trunc_w_d(scratch, scratch);
+  mfc1(rs, scratch);
+  Or(rs, rs, 1 << 31);
 
   Label done;
   Branch(&done);
   // Simple conversion.
   bind(&simple_convert);
-  trunc_w_d(f22, fd);
-  mfc1(rs, f22);
+  trunc_w_d(scratch, fd);
+  mfc1(rs, scratch);
 
   bind(&done);
 }
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 4994516..0fcf6f1 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -524,12 +524,12 @@
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
   // Convert unsigned word to double.
-  void Cvt_d_uw(FPURegister fd, FPURegister fs);
-  void Cvt_d_uw(FPURegister fd, Register rs);
+  void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
 
   // Convert double to unsigned word.
-  void Trunc_uw_d(FPURegister fd, FPURegister fs);
-  void Trunc_uw_d(FPURegister fd, Register rs);
+  void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 9935ef9..45d3963 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -1036,12 +1036,12 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1059,8 +1059,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1071,8 +1083,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1080,13 +1092,14 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
     // addresses in the RegExp stack frame to match the new value.
     const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
-    int byte_length = end_address - start_address;
+    int byte_length = static_cast<int>(end_address - start_address);
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index ec63551..c17a658 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -3638,7 +3638,7 @@
       // __ mtc1(zero_reg, f1);  // MS 32-bits are all zero.
       // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
 
-      __ Cvt_d_uw(f0, value);
+      __ Cvt_d_uw(f0, value, f22);
 
       __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset - kHeapObjectTag));
 
@@ -4428,7 +4428,8 @@
   __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
   uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
   __ sw(exponent_reg, FieldMemOperand(scratch, offset));
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value_reg);  // In delay slot.
 
   __ bind(&maybe_nan);
   // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
@@ -4478,7 +4479,8 @@
     __ sw(mantissa_reg, MemOperand(scratch, 0));
     __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
   }
-  __ Ret();
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value_reg);  // In delay slot.
 
   // Handle store cache miss, replacing the ic with the generic stub.
   __ bind(&miss_force_generic);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 8c0330d..4da360b 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -352,6 +352,31 @@
   if (IsSymbol()) {
     CHECK(!HEAP->InNewSpace(this));
   }
+  if (IsConsString()) {
+    ConsString::cast(this)->ConsStringVerify();
+  } else if (IsSlicedString()) {
+    SlicedString::cast(this)->SlicedStringVerify();
+  }
+}
+
+
+void ConsString::ConsStringVerify() {
+  CHECK(this->first()->IsString());
+  CHECK(this->second() == GetHeap()->empty_string() ||
+        this->second()->IsString());
+  CHECK(this->length() >= String::kMinNonFlatLength);
+  if (this->IsFlat()) {
+    // A flat cons can only be created by String::SlowTryFlatten.
+    // Afterwards, the first part may be externalized.
+    CHECK(this->first()->IsSeqString() || this->first()->IsExternalString());
+  }
+}
+
+
+void SlicedString::SlicedStringVerify() {
+  CHECK(!this->parent()->IsConsString());
+  CHECK(!this->parent()->IsSlicedString());
+  CHECK(this->length() >= SlicedString::kMinLength);
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 098bd7a..b4c6a3a 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -178,10 +178,14 @@
 
 
 bool Object::IsConsString() {
-  if (!this->IsHeapObject()) return false;
-  uint32_t type = HeapObject::cast(this)->map()->instance_type();
-  return (type & (kIsNotStringMask | kStringRepresentationMask)) ==
-         (kStringTag | kConsStringTag);
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsCons();
+}
+
+
+bool Object::IsSlicedString() {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsSliced();
 }
 
 
@@ -269,6 +273,38 @@
 }
 
 
+bool String::IsAsciiRepresentationUnderneath() {
+  uint32_t type = map()->instance_type();
+  STATIC_ASSERT(kIsIndirectStringTag != 0);
+  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+  ASSERT(IsFlat());
+  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+    case kAsciiStringTag:
+      return true;
+    case kTwoByteStringTag:
+      return false;
+    default:  // Cons or sliced string.  Need to go deeper.
+      return GetUnderlying()->IsAsciiRepresentation();
+  }
+}
+
+
+bool String::IsTwoByteRepresentationUnderneath() {
+  uint32_t type = map()->instance_type();
+  STATIC_ASSERT(kIsIndirectStringTag != 0);
+  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+  ASSERT(IsFlat());
+  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+    case kAsciiStringTag:
+      return false;
+    case kTwoByteStringTag:
+      return true;
+    default:  // Cons or sliced string.  Need to go deeper.
+      return GetUnderlying()->IsTwoByteRepresentation();
+  }
+}
+
+
 bool String::HasOnlyAsciiChars() {
   uint32_t type = map()->instance_type();
   return (type & kStringEncodingMask) == kAsciiStringTag ||
@@ -281,6 +317,16 @@
 }
 
 
+bool StringShape::IsSliced() {
+  return (type_ & kStringRepresentationMask) == kSlicedStringTag;
+}
+
+
+bool StringShape::IsIndirect() {
+  return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
+}
+
+
 bool StringShape::IsExternal() {
   return (type_ & kStringRepresentationMask) == kExternalStringTag;
 }
@@ -2075,6 +2121,7 @@
 CAST_ACCESSOR(SeqString)
 CAST_ACCESSOR(SeqAsciiString)
 CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SlicedString)
 CAST_ACCESSOR(ConsString)
 CAST_ACCESSOR(ExternalString)
 CAST_ACCESSOR(ExternalAsciiString)
@@ -2156,7 +2203,7 @@
 MaybeObject* String::TryFlatten(PretenureFlag pretenure) {
   if (!StringShape(this).IsCons()) return this;
   ConsString* cons = ConsString::cast(this);
-  if (cons->second()->length() == 0) return cons->first();
+  if (cons->IsFlat()) return cons->first();
   return SlowTryFlatten(pretenure);
 }
 
@@ -2164,10 +2211,8 @@
 String* String::TryFlattenGetString(PretenureFlag pretenure) {
   MaybeObject* flat = TryFlatten(pretenure);
   Object* successfully_flattened;
-  if (flat->ToObject(&successfully_flattened)) {
-    return String::cast(successfully_flattened);
-  }
-  return this;
+  if (!flat->ToObject(&successfully_flattened)) return this;
+  return String::cast(successfully_flattened);
 }
 
 
@@ -2185,6 +2230,9 @@
       return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
     case kExternalStringTag | kTwoByteStringTag:
       return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
+    case kSlicedStringTag | kAsciiStringTag:
+    case kSlicedStringTag | kTwoByteStringTag:
+      return SlicedString::cast(this)->SlicedStringGet(index);
     default:
       break;
   }
@@ -2205,15 +2253,19 @@
 
 
 bool String::IsFlat() {
-  switch (StringShape(this).representation_tag()) {
-    case kConsStringTag: {
-      String* second = ConsString::cast(this)->second();
-      // Only flattened strings have second part empty.
-      return second->length() == 0;
-    }
-    default:
-      return true;
-  }
+  if (!StringShape(this).IsCons()) return true;
+  return ConsString::cast(this)->second()->length() == 0;
+}
+
+
+String* String::GetUnderlying() {
+  // Giving direct access to underlying string only makes sense if the
+  // wrapping string is already flattened.
+  ASSERT(this->IsFlat());
+  ASSERT(StringShape(this).IsIndirect());
+  STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
+  const int kUnderlyingOffset = SlicedString::kParentOffset;
+  return String::cast(READ_FIELD(this, kUnderlyingOffset));
 }
 
 
@@ -2272,6 +2324,20 @@
 }
 
 
+String* SlicedString::parent() {
+  return String::cast(READ_FIELD(this, kParentOffset));
+}
+
+
+void SlicedString::set_parent(String* parent) {
+  ASSERT(parent->IsSeqString());
+  WRITE_FIELD(this, kParentOffset, parent);
+}
+
+
+SMI_ACCESSORS(SlicedString, offset, kOffsetOffset)
+
+
 String* ConsString::first() {
   return String::cast(READ_FIELD(this, kFirstOffset));
 }
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 84ab57f..bde9e83 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -58,6 +58,9 @@
           return kVisitConsString;
         }
 
+      case kSlicedStringTag:
+        return kVisitSlicedString;
+
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject,
                                    kVisitDataObjectGeneric,
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index c96a8ef..4ce1bd0 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -115,6 +115,7 @@
     kVisitStructGeneric,
 
     kVisitConsString,
+    kVisitSlicedString,
     kVisitOddball,
     kVisitCode,
     kVisitMap,
@@ -299,6 +300,11 @@
                                       ConsString::BodyDescriptor,
                                       int>::Visit);
 
+    table_.Register(kVisitSlicedString,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      SlicedString::BodyDescriptor,
+                                      int>::Visit);
+
     table_.Register(kVisitFixedArray,
                     &FlexibleBodyVisitor<StaticVisitor,
                                          FixedArray::BodyDescriptor,
diff --git a/src/objects.cc b/src/objects.cc
index 0bf4c18..0a9598c 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1208,6 +1208,9 @@
       case kConsStringTag:
         ConsString::BodyDescriptor::IterateBody(this, v);
         break;
+      case kSlicedStringTag:
+        SlicedString::BodyDescriptor::IterateBody(this, v);
+        break;
       case kExternalStringTag:
         if ((type & kStringEncodingMask) == kAsciiStringTag) {
           reinterpret_cast<ExternalAsciiString*>(this)->
@@ -5042,6 +5045,7 @@
   int length = this->length();
   StringShape shape(this);
   String* string = this;
+  int offset = 0;
   if (shape.representation_tag() == kConsStringTag) {
     ConsString* cons = ConsString::cast(string);
     if (cons->second()->length() != 0) {
@@ -5050,6 +5054,14 @@
     string = cons->first();
     shape = StringShape(string);
   }
+  if (shape.representation_tag() == kSlicedStringTag) {
+    SlicedString* slice = SlicedString::cast(string);
+    offset = slice->offset();
+    string = slice->parent();
+    shape = StringShape(string);
+    ASSERT(shape.representation_tag() != kConsStringTag &&
+           shape.representation_tag() != kSlicedStringTag);
+  }
   if (shape.encoding_tag() == kAsciiStringTag) {
     const char* start;
     if (shape.representation_tag() == kSeqStringTag) {
@@ -5057,7 +5069,7 @@
     } else {
       start = ExternalAsciiString::cast(string)->resource()->data();
     }
-    return FlatContent(Vector<const char>(start, length));
+    return FlatContent(Vector<const char>(start + offset, length));
   } else {
     ASSERT(shape.encoding_tag() == kTwoByteStringTag);
     const uc16* start;
@@ -5066,7 +5078,7 @@
     } else {
       start = ExternalTwoByteString::cast(string)->resource()->data();
     }
-    return FlatContent(Vector<const uc16>(start, length));
+    return FlatContent(Vector<const uc16>(start + offset, length));
   }
 }
 
@@ -5138,13 +5150,17 @@
 
 
 const uc16* String::GetTwoByteData(unsigned start) {
-  ASSERT(!IsAsciiRepresentation());
+  ASSERT(!IsAsciiRepresentationUnderneath());
   switch (StringShape(this).representation_tag()) {
     case kSeqStringTag:
       return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
     case kExternalStringTag:
       return ExternalTwoByteString::cast(this)->
         ExternalTwoByteStringGetData(start);
+    case kSlicedStringTag: {
+      SlicedString* slice = SlicedString::cast(this);
+      return slice->parent()->GetTwoByteData(start + slice->offset());
+    }
     case kConsStringTag:
       UNREACHABLE();
       return NULL;
@@ -5435,6 +5451,10 @@
                                                      max_chars);
         return rbb->util_buffer;
       }
+    case kSlicedStringTag:
+      return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
+                                                              offset_ptr,
+                                                              max_chars);
     default:
       break;
   }
@@ -5578,6 +5598,11 @@
                                                      max_chars);
        }
        return;
+    case kSlicedStringTag:
+      SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
+                                                                 offset_ptr,
+                                                                 max_chars);
+      return;
     default:
       break;
   }
@@ -5712,6 +5737,31 @@
 }
 
 
+uint16_t SlicedString::SlicedStringGet(int index) {
+  return parent()->Get(offset() + index);
+}
+
+
+const unibrow::byte* SlicedString::SlicedStringReadBlock(
+    ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
+  unsigned offset = this->offset();
+  *offset_ptr += offset;
+  const unibrow::byte* answer = String::ReadBlock(String::cast(parent()),
+                                                  buffer, offset_ptr, chars);
+  *offset_ptr -= offset;
+  return answer;
+}
+
+
+void SlicedString::SlicedStringReadBlockIntoBuffer(
+    ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
+  unsigned offset = this->offset();
+  *offset_ptr += offset;
+  String::ReadBlockIntoBuffer(String::cast(parent()),
+                              buffer, offset_ptr, chars);
+  *offset_ptr -= offset;
+}
+
 template <typename sinkchar>
 void String::WriteToFlat(String* src,
                          sinkchar* sink,
@@ -5779,6 +5829,13 @@
         }
         break;
       }
+      case kAsciiStringTag | kSlicedStringTag:
+      case kTwoByteStringTag | kSlicedStringTag: {
+        SlicedString* slice = SlicedString::cast(source);
+        unsigned offset = slice->offset();
+        WriteToFlat(slice->parent(), sink, from + offset, to + offset);
+        return;
+      }
     }
   }
 }
diff --git a/src/objects.h b/src/objects.h
index fc5d2e7..6c8888b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -89,6 +89,7 @@
 //         - SeqString
 //           - SeqAsciiString
 //           - SeqTwoByteString
+//         - SlicedString
 //         - ConsString
 //         - ExternalString
 //           - ExternalAsciiString
@@ -283,6 +284,7 @@
   V(ASCII_STRING_TYPE)                                                         \
   V(CONS_STRING_TYPE)                                                          \
   V(CONS_ASCII_STRING_TYPE)                                                    \
+  V(SLICED_STRING_TYPE)                                                        \
   V(EXTERNAL_STRING_TYPE)                                                      \
   V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_STRING_TYPE)                                                \
@@ -401,6 +403,14 @@
     ConsString::kSize,                                                         \
     cons_ascii_string,                                                         \
     ConsAsciiString)                                                           \
+  V(SLICED_STRING_TYPE,                                                        \
+    SlicedString::kSize,                                                       \
+    sliced_string,                                                             \
+    SlicedString)                                                              \
+  V(SLICED_ASCII_STRING_TYPE,                                                  \
+    SlicedString::kSize,                                                       \
+    sliced_ascii_string,                                                       \
+    SlicedAsciiString)                                                         \
   V(EXTERNAL_STRING_TYPE,                                                      \
     ExternalTwoByteString::kSize,                                              \
     external_string,                                                           \
@@ -474,9 +484,17 @@
 enum StringRepresentationTag {
   kSeqStringTag = 0x0,
   kConsStringTag = 0x1,
-  kExternalStringTag = 0x2
+  kExternalStringTag = 0x2,
+  kSlicedStringTag = 0x3
 };
-const uint32_t kIsConsStringMask = 0x1;
+const uint32_t kIsIndirectStringMask = 0x1;
+const uint32_t kIsIndirectStringTag = 0x1;
+STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
+STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
+STATIC_ASSERT(
+    (kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
+STATIC_ASSERT(
+    (kSlicedStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
 
 // If bit 7 is clear, then bit 3 indicates whether this two-byte
 // string actually contains ascii data.
@@ -511,6 +529,8 @@
   ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
   CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
   CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
+  SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
+  SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
   EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
   EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
@@ -718,6 +738,7 @@
   V(SeqString)                                 \
   V(ExternalString)                            \
   V(ConsString)                                \
+  V(SlicedString)                              \
   V(ExternalTwoByteString)                     \
   V(ExternalAsciiString)                       \
   V(SeqTwoByteString)                          \
@@ -5783,6 +5804,8 @@
   inline bool IsSequential();
   inline bool IsExternal();
   inline bool IsCons();
+  inline bool IsSliced();
+  inline bool IsIndirect();
   inline bool IsExternalAscii();
   inline bool IsExternalTwoByte();
   inline bool IsSequentialAscii();
@@ -5874,14 +5897,19 @@
   inline uint32_t hash_field();
   inline void set_hash_field(uint32_t value);
 
-  inline bool IsAsciiRepresentation();
-  inline bool IsTwoByteRepresentation();
-
   // Returns whether this string has only ASCII chars, i.e. all of them can
   // be ASCII encoded.  This might be the case even if the string is
   // two-byte.  Such strings may appear when the embedder prefers
   // two-byte external representations even for ASCII data.
-  //
+  inline bool IsAsciiRepresentation();
+  inline bool IsTwoByteRepresentation();
+
+  // Cons and slices have an encoding flag that may not represent the actual
+  // encoding of the underlying string.  This is taken into account here.
+  // Requires: this->IsFlat()
+  inline bool IsAsciiRepresentationUnderneath();
+  inline bool IsTwoByteRepresentationUnderneath();
+
   // NOTE: this should be considered only a hint.  False negatives are
   // possible.
   inline bool HasOnlyAsciiChars();
@@ -5921,6 +5949,10 @@
   // kind.
   FlatContent GetFlatContent();
 
+  // Returns the parent of a sliced string or first part of a flat cons string.
+  // Requires: StringShape(this).IsIndirect() && this->IsFlat()
+  inline String* GetUnderlying();
+
   // Mark the string as an undetectable object. It only applies to
   // ascii and two byte string types.
   bool MarkAsUndetectable();
@@ -6349,11 +6381,69 @@
   typedef FixedBodyDescriptor<kFirstOffset, kSecondOffset + kPointerSize, kSize>
           BodyDescriptor;
 
+#ifdef DEBUG
+  void ConsStringVerify();
+#endif
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
 };
 
 
+// The Sliced String class describes strings that are substrings of another
+// sequential string.  The motivation is to save time and memory when creating
+// a substring.  A Sliced String is described as a pointer to the parent,
+// the offset from the start of the parent string and the length.  Using
+// a Sliced String therefore requires unpacking of the parent string and
+// adding the offset to the start address.  A substring of a Sliced String
+// are not nested since the double indirection is simplified when creating
+// such a substring.
+// Currently missing features are:
+//  - handling externalized parent strings
+//  - external strings as parent
+//  - truncating sliced string to enable otherwise unneeded parent to be GC'ed.
+class SlicedString: public String {
+ public:
+
+  inline String* parent();
+  inline void set_parent(String* parent);
+  inline int offset();
+  inline void set_offset(int offset);
+
+  // Dispatched behavior.
+  uint16_t SlicedStringGet(int index);
+
+  // Casting.
+  static inline SlicedString* cast(Object* obj);
+
+  // Layout description.
+  static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
+  static const int kOffsetOffset = kParentOffset + kPointerSize;
+  static const int kSize = kOffsetOffset + kPointerSize;
+
+  // Support for StringInputBuffer
+  inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
+                                                    unsigned* offset_ptr,
+                                                    unsigned chars);
+  inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+                                              unsigned* offset_ptr,
+                                              unsigned chars);
+  // Minimum length for a sliced string.
+  static const int kMinLength = 13;
+
+  typedef FixedBodyDescriptor<kParentOffset,
+                              kOffsetOffset + kPointerSize, kSize>
+          BodyDescriptor;
+
+#ifdef DEBUG
+  void SlicedStringVerify();
+#endif
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString);
+};
+
+
 // The ExternalString class describes string values that are backed by
 // a string resource that lies outside the V8 heap.  ExternalStrings
 // consist of the length field common to all strings, a pointer to the
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 5578243..f91ea93 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -120,27 +120,31 @@
   String* subject_ptr = *subject;
   // Character offsets into string.
   int start_offset = previous_index;
-  int end_offset = subject_ptr->length();
+  int char_length = subject_ptr->length() - start_offset;
+  int slice_offset = 0;
 
-  // The string has been flattened, so it it is a cons string it contains the
+  // The string has been flattened, so if it is a cons string it contains the
   // full string in the first part.
   if (StringShape(subject_ptr).IsCons()) {
     ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
     subject_ptr = ConsString::cast(subject_ptr)->first();
+  } else if (StringShape(subject_ptr).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(subject_ptr);
+    subject_ptr = slice->parent();
+    slice_offset = slice->offset();
   }
   // Ensure that an underlying string has the same ascii-ness.
   bool is_ascii = subject_ptr->IsAsciiRepresentation();
   ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
   // String is now either Sequential or External
   int char_size_shift = is_ascii ? 0 : 1;
-  int char_length = end_offset - start_offset;
 
   const byte* input_start =
-      StringCharacterPosition(subject_ptr, start_offset);
+      StringCharacterPosition(subject_ptr, start_offset + slice_offset);
   int byte_length = char_length << char_size_shift;
   const byte* input_end = input_start + byte_length;
   Result res = Execute(*regexp_code,
-                       subject_ptr,
+                       *subject,
                        start_offset,
                        input_start,
                        input_end,
@@ -152,7 +156,7 @@
 
 NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
     Code* code,
-    String* input,
+    String* input,  // This needs to be the unpacked (sliced, cons) string.
     int start_offset,
     const byte* input_start,
     const byte* input_end,
diff --git a/src/runtime.cc b/src/runtime.cc
index fd866bf..802fd68 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -683,8 +683,18 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  Object* obj = args[0];
+  CONVERT_CHECKED(JSReceiver, input_obj, args[0]);
+  Object* obj = input_obj;
+  // We don't expect access checks to be needed on JSProxy objects.
+  ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
   do {
+    if (obj->IsAccessCheckNeeded() &&
+        !isolate->MayNamedAccess(JSObject::cast(obj),
+                                 isolate->heap()->Proto_symbol(),
+                                 v8::ACCESS_GET)) {
+      isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
+      return isolate->heap()->undefined_value();
+    }
     obj = obj->GetPrototype();
   } while (obj->IsJSObject() &&
            JSObject::cast(obj)->map()->is_hidden_prototype());
@@ -1856,10 +1866,19 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetGlobalReceiver) {
-  // Returns a real global receiver, not one of builtins object.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
+  NoHandleAllocation handle_free;
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSFunction, function, args[0]);
+  SharedFunctionInfo* shared = function->shared();
+  if (shared->native() || shared->strict_mode()) {
+    return isolate->heap()->undefined_value();
+  }
+  // Returns undefined for strict or native functions, or
+  // the associated global receiver for "normal" functions.
+
   Context* global_context =
-      isolate->context()->global()->global_context();
+      function->context()->global()->global_context();
   return global_context->global()->global_receiver();
 }
 
@@ -3664,7 +3683,7 @@
   HandleScope handles(isolate);
 
   CONVERT_ARG_CHECKED(String, subject, 1);
-  if (!subject->IsFlat()) { FlattenString(subject); }
+  if (!subject->IsFlat()) FlattenString(subject);
   CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
   CONVERT_ARG_CHECKED(JSArray, result_array, 3);
diff --git a/src/runtime.h b/src/runtime.h
index 06d3684..91a19df 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -65,7 +65,7 @@
   F(ToSlowProperties, 1, 1) \
   F(FinishArrayPrototypeSetup, 1, 1) \
   F(SpecialArrayFunctions, 1, 1) \
-  F(GetGlobalReceiver, 0, 1) \
+  F(GetDefaultReceiver, 1, 1) \
   \
   F(GetPrototype, 1, 1) \
   F(IsInPrototypeChain, 2, 1) \
@@ -491,8 +491,7 @@
   F(IsRegExpEquivalent, 2, 1)                                                \
   F(HasCachedArrayIndex, 1, 1)                                               \
   F(GetCachedArrayIndex, 1, 1)                                               \
-  F(FastAsciiArrayJoin, 2, 1)                                                \
-  F(IsNativeOrStrictMode, 1, 1)
+  F(FastAsciiArrayJoin, 2, 1)
 
 
 // ----------------------------------------------------------------------------
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index ca1177f..069d01d 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -294,13 +294,13 @@
   SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
 }
 
-bool Page::IsPageExecutable() {
-  return GetPageFlag(IS_EXECUTABLE);
+Executability Page::PageExecutability() {
+  return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
 }
 
 
-void Page::SetIsPageExecutable(bool is_page_executable) {
-  SetPageFlag(IS_EXECUTABLE, is_page_executable);
+void Page::SetPageExecutability(Executability executable) {
+  SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
 }
 
 
@@ -434,23 +434,6 @@
 
 
 // -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-Address LargeObjectChunk::GetStartAddress() {
-  // Round the chunk address up to the nearest page-aligned address
-  // and return the heap object in that page.
-  Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  return page->ObjectAreaStart();
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
-  Isolate* isolate =
-      Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
-  isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
-}
-
-// -----------------------------------------------------------------------------
 // NewSpace
 
 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
diff --git a/src/spaces.cc b/src/spaces.cc
index a782ba9..97c6d2a 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2722,12 +2722,26 @@
 
   LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
   chunk->size_ = size;
-  Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
-  page->heap_ = isolate->heap();
+  chunk->GetPage()->heap_ = isolate->heap();
   return chunk;
 }
 
 
+void LargeObjectChunk::Free(Executability executable) {
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+  ObjectSpace space =
+      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
+  // Do not access instance fields after FreeRawMemory!
+  Address my_address = address();
+  size_t my_size = size();
+  Isolate* isolate = GetPage()->heap_->isolate();
+  MemoryAllocator* a = isolate->memory_allocator();
+  a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
+  a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
+  LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
+}
+
+
 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
   int os_alignment = static_cast<int>(OS::AllocateAlignment());
   if (os_alignment < Page::kPageSize) {
@@ -2760,25 +2774,9 @@
   while (first_chunk_ != NULL) {
     LargeObjectChunk* chunk = first_chunk_;
     first_chunk_ = first_chunk_->next();
-    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
-    Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
-    Executability executable =
-        page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
-    ObjectSpace space = kObjectSpaceLoSpace;
-    if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
-    size_t size = chunk->size();
-    size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-    heap()->isolate()->memory_allocator()->FreeRawMemory(
-        chunk->address() - guard_size,
-        size + guard_size,
-        executable);
-    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
-        space, kAllocationActionFree, size);
+    chunk->Free(chunk->GetPage()->PageExecutability());
   }
-
-  size_ = 0;
-  page_count_ = 0;
-  objects_size_ = 0;
+  Setup();
 }
 
 
@@ -2806,14 +2804,14 @@
   first_chunk_ = chunk;
 
   // Initialize page header.
-  Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
+  Page* page = chunk->GetPage();
   Address object_address = page->ObjectAreaStart();
 
   // Clear the low order bit of the second word in the page to flag it as a
   // large object page.  If the chunk_size happened to be written there, its
   // low order bit should already be clear.
   page->SetIsLargeObjectPage(true);
-  page->SetIsPageExecutable(executable);
+  page->SetPageExecutability(executable);
   page->SetRegionMarks(Page::kAllRegionsCleanMarks);
   return HeapObject::FromAddress(object_address);
 }
@@ -2944,14 +2942,8 @@
       previous = current;
       current = current->next();
     } else {
-      Page* page = Page::FromAddress(RoundUp(current->address(),
-                                     Page::kPageSize));
-      Executability executable =
-          page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
-      Address chunk_address = current->address();
-      size_t chunk_size = current->size();
-
       // Cut the chunk out from the chunk list.
+      LargeObjectChunk* current_chunk = current;
       current = current->next();
       if (previous == NULL) {
         first_chunk_ = current;
@@ -2964,22 +2956,10 @@
           object, heap()->isolate());
       LiveObjectList::ProcessNonLive(object);
 
-      size_ -= static_cast<int>(chunk_size);
+      size_ -= static_cast<int>(current_chunk->size());
       objects_size_ -= object->Size();
       page_count_--;
-      ObjectSpace space = kObjectSpaceLoSpace;
-      size_t guard_size = 0;
-      if (executable == EXECUTABLE) {
-        space = kObjectSpaceCodeSpace;
-        guard_size = Page::kPageSize;
-      }
-      heap()->isolate()->memory_allocator()->FreeRawMemory(
-          chunk_address - guard_size,
-          chunk_size + guard_size,
-          executable);
-      heap()->isolate()->memory_allocator()->PerformAllocationCallback(
-          space, kAllocationActionFree, size_);
-      LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
+      current_chunk->Free(current_chunk->GetPage()->PageExecutability());
     }
   }
 }
diff --git a/src/spaces.h b/src/spaces.h
index a0f4ba1..908cd30 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -200,9 +200,9 @@
 
   inline void SetIsLargeObjectPage(bool is_large_object_page);
 
-  inline bool IsPageExecutable();
+  inline Executability PageExecutability();
 
-  inline void SetIsPageExecutable(bool is_page_executable);
+  inline void SetPageExecutability(Executability executable);
 
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
@@ -2144,7 +2144,7 @@
   static LargeObjectChunk* New(int size_in_bytes, Executability executable);
 
   // Free the memory associated with the chunk.
-  inline void Free(Executability executable);
+  void Free(Executability executable);
 
   // Interpret a raw address as a large object chunk.
   static LargeObjectChunk* FromAddress(Address address) {
@@ -2154,13 +2154,17 @@
   // Returns the address of this chunk.
   Address address() { return reinterpret_cast<Address>(this); }
 
+  Page* GetPage() {
+    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
+  }
+
   // Accessors for the fields of the chunk.
   LargeObjectChunk* next() { return next_; }
   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
   size_t size() { return size_ & ~Page::kPageFlagMask; }
 
   // Compute the start address in the chunk.
-  inline Address GetStartAddress();
+  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
 
   // Returns the object in this chunk.
   HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
diff --git a/src/string.js b/src/string.js
index 08fccc4..a70eead 100644
--- a/src/string.js
+++ b/src/string.js
@@ -251,8 +251,7 @@
 
   // Compute the string to replace with.
   if (IS_FUNCTION(replace)) {
-    var receiver =
-        %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
+    var receiver = %GetDefaultReceiver(replace);
     builder.add(%_CallFunction(receiver,
                                search,
                                start,
@@ -420,8 +419,7 @@
   if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
     var match_start = 0;
     var override = new InternalArray(null, 0, subject);
-    var receiver =
-        %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
+    var receiver = %GetDefaultReceiver(replace);
     while (i < len) {
       var elem = res[i];
       if (%_IsSmi(elem)) {
@@ -478,8 +476,7 @@
     // No captures, only the match, which is always valid.
     var s = SubString(subject, index, endOfMatch);
     // Don't call directly to avoid exposing the built-in global object.
-    var receiver =
-        %_IsNativeOrStrictMode(replace) ? void 0 : %GetGlobalReceiver();
+    var receiver = %GetDefaultReceiver(replace);
     replacement =
         %_CallFunction(receiver, s, index, subject, replace);
   } else {
diff --git a/src/v8natives.js b/src/v8natives.js
index c16d73e..035fd2e 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -354,33 +354,50 @@
 // ES5 8.10.4
 function FromPropertyDescriptor(desc) {
   if (IS_UNDEFINED(desc)) return desc;
-  var obj = new $Object();
+
   if (IsDataDescriptor(desc)) {
-    obj.value = desc.getValue();
-    obj.writable = desc.isWritable();
+    return { value: desc.getValue(),
+             writable: desc.isWritable(),
+             enumerable: desc.isEnumerable(),
+             configurable: desc.isConfigurable() };
   }
-  if (IsAccessorDescriptor(desc)) {
-    obj.get = desc.getGet();
-    obj.set = desc.getSet();
-  }
-  obj.enumerable = desc.isEnumerable();
-  obj.configurable = desc.isConfigurable();
-  return obj;
+  // Must be an AccessorDescriptor then. We never return a generic descriptor.
+  return { get: desc.getGet(),
+           set: desc.getSet(),
+           enumerable: desc.isEnumerable(),
+           configurable: desc.isConfigurable() };
 }
 
+
 // Harmony Proxies
 function FromGenericPropertyDescriptor(desc) {
   if (IS_UNDEFINED(desc)) return desc;
   var obj = new $Object();
-  if (desc.hasValue()) obj.value = desc.getValue();
-  if (desc.hasWritable()) obj.writable = desc.isWritable();
-  if (desc.hasGetter()) obj.get = desc.getGet();
-  if (desc.hasSetter()) obj.set = desc.getSet();
-  if (desc.hasEnumerable()) obj.enumerable = desc.isEnumerable();
-  if (desc.hasConfigurable()) obj.configurable = desc.isConfigurable();
+
+  if (desc.hasValue()) {
+    %IgnoreAttributesAndSetProperty(obj, "value", desc.getValue(), NONE);
+  }
+  if (desc.hasWritable()) {
+    %IgnoreAttributesAndSetProperty(obj, "writable", desc.isWritable(), NONE);
+  }
+  if (desc.hasGetter()) {
+    %IgnoreAttributesAndSetProperty(obj, "get", desc.getGet(), NONE);
+  }
+  if (desc.hasSetter()) {
+    %IgnoreAttributesAndSetProperty(obj, "set", desc.getSet(), NONE);
+  }
+  if (desc.hasEnumerable()) {
+    %IgnoreAttributesAndSetProperty(obj, "enumerable",
+                                    desc.isEnumerable(), NONE);
+  }
+  if (desc.hasConfigurable()) {
+    %IgnoreAttributesAndSetProperty(obj, "configurable",
+                                    desc.isConfigurable(), NONE);
+  }
   return obj;
 }
 
+
 // ES5 8.10.5.
 function ToPropertyDescriptor(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
@@ -621,38 +638,6 @@
 }
 
 
-// ES5 section 8.12.2.
-function GetProperty(obj, p) {
-  if (%IsJSProxy(obj)) {
-    var handler = %GetHandler(obj);
-    var descriptor = CallTrap1(obj, "getPropertyDescriptor", void 0, p);
-    if (IS_UNDEFINED(descriptor)) return descriptor;
-    var desc = ToCompletePropertyDescriptor(descriptor);
-    if (!desc.isConfigurable()) {
-      throw MakeTypeError("proxy_prop_not_configurable",
-                          [handler, "getPropertyDescriptor", p, descriptor]);
-    }
-    return desc;
-  }
-  var prop = GetOwnProperty(obj);
-  if (!IS_UNDEFINED(prop)) return prop;
-  var proto = %GetPrototype(obj);
-  if (IS_NULL(proto)) return void 0;
-  return GetProperty(proto, p);
-}
-
-
-// ES5 section 8.12.6
-function HasProperty(obj, p) {
-  if (%IsJSProxy(obj)) {
-    var handler = %GetHandler(obj);
-    return ToBoolean(CallTrap1(handler, "has", DerivedHasTrap, p));
-  }
-  var desc = GetProperty(obj, p);
-  return IS_UNDEFINED(desc) ? false : true;
-}
-
-
 // ES5 section 8.12.1.
 function GetOwnProperty(obj, v) {
   var p = ToString(v);
diff --git a/src/version.cc b/src/version.cc
index fd168e3..bf11f4f 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     5
-#define BUILD_NUMBER      8
+#define BUILD_NUMBER      9
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 94ed0cb..0b785c5 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2374,7 +2374,6 @@
   __ testq(kScratchRegister, kScratchRegister);
   __ j(zero, &runtime);
 
-
   // Check that the first argument is a JSRegExp object.
   __ movq(rax, Operand(rsp, kJSRegExpOffset));
   __ JumpIfSmi(rax, &runtime);
@@ -2445,10 +2444,14 @@
   __ cmpl(rdx, rdi);
   __ j(greater, &runtime);
 
+  // Reset offset for possibly sliced string.
+  __ Set(r14, 0);
   // rax: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
   Label seq_ascii_string, seq_two_byte_string, check_code;
   __ movq(rdi, Operand(rsp, kSubjectOffset));
+  // Make a copy of the original subject string.
+  __ movq(r15, rdi);
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
@@ -2457,28 +2460,40 @@
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be a flat ascii string.
-  __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
+  __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
-  // Check for flat cons string.
+  // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  STATIC_ASSERT(kExternalStringTag !=0);
-  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
-  __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
-  __ j(not_zero, &runtime);
-  // String is a cons string.
+  // In the case of a sliced string its offset has to be taken into account.
+  Label cons_string, check_encoding;
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ cmpq(rbx, Immediate(kExternalStringTag));
+  __ j(less, &cons_string, Label::kNear);
+  __ j(equal, &runtime);
+
+  // String is sliced.
+  __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
+  __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+  // r14: slice offset
+  // r15: original subject string
+  // rdi: parent string
+  __ jmp(&check_encoding, Label::kNear);
+  // String is a cons string, check whether it is flat.
+  __ bind(&cons_string);
   __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
                  Heap::kEmptyStringRootIndex);
   __ j(not_equal, &runtime);
   __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
+  // rdi: first part of cons string or parent of sliced string.
+  // rbx: map of first part of cons string or map of parent of sliced string.
+  // Is first part of cons or parent of slice a flat two byte string?
+  __ bind(&check_encoding);
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  // String is a cons string with empty second part.
-  // rdi: first part of cons string.
-  // rbx: map of first part of cons string.
-  // Is first part a flat two byte string?
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
@@ -2575,33 +2590,40 @@
   // rbx: previous index
   // rcx: encoding of subject string (1 if ascii 0 if two_byte);
   // r11: code
+  // r14: slice offset
+  // r15: original subject string
 
-  // Argument 4: End of string data
-  // Argument 3: Start of string data
-  Label setup_two_byte, setup_rest;
-  __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
-  __ j(zero, &setup_two_byte, Label::kNear);
-  __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
-  __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
-  __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
-  __ jmp(&setup_rest, Label::kNear);
-  __ bind(&setup_two_byte);
-  __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
-  __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
-  __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
-
-  __ bind(&setup_rest);
   // Argument 2: Previous index.
   __ movq(arg2, rbx);
 
-  // Argument 1: Subject string.
-#ifdef _WIN64
-  __ movq(arg1, rdi);
-#else
-  // Already there in AMD64 calling convention.
-  ASSERT(arg1.is(rdi));
-  USE(arg1);
-#endif
+  // Argument 4: End of string data
+  // Argument 3: Start of string data
+  Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
+  // Prepare start and end index of the input.
+  // Load the length from the original sliced string if that is the case.
+  __ addq(rbx, r14);
+  __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
+  __ addq(r14, arg3);  // Using arg3 as scratch.
+
+  // rbx: start index of the input
+  // r14: end index of the input
+  // r15: original subject string
+  __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
+  __ j(zero, &setup_two_byte, Label::kNear);
+  __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
+  __ jmp(&setup_rest, Label::kNear);
+  __ bind(&setup_two_byte);
+  __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
+  __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
+  __ bind(&setup_rest);
+
+  // Argument 1: Original subject string.
+  // The original subject is in the previous stack frame. Therefore we have to
+  // use rbp, which points exactly to one pointer size below the previous rsp.
+  // (Because creating a new stack frame pushes the previous rbp onto the stack
+  // and thereby moves up rsp by one kPointerSize.)
+  __ movq(arg1, r15);
 
   // Locate the code entry and call it.
   __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -3851,6 +3873,7 @@
   Label flat_string;
   Label ascii_string;
   Label got_char_code;
+  Label sliced_string;
 
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -3879,25 +3902,39 @@
   __ j(zero, &flat_string);
 
   // Handle non-flat strings.
-  __ testb(result_, Immediate(kIsConsStringMask));
-  __ j(zero, &call_runtime_);
+  __ and_(result_, Immediate(kStringRepresentationMask));
+  STATIC_ASSERT((kConsStringTag < kExternalStringTag));
+  STATIC_ASSERT((kSlicedStringTag > kExternalStringTag));
+  __ cmpb(result_, Immediate(kExternalStringTag));
+  __ j(greater, &sliced_string);
+  __ j(equal, &call_runtime_);
 
   // ConsString.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  Label assure_seq_string;
   __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
                  Heap::kEmptyStringRootIndex);
   __ j(not_equal, &call_runtime_);
   // Get the first of the two strings and load its instance type.
   __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string, Label::kNear);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
+  __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
+
+  __ bind(&assure_seq_string);
   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   // If the first cons component is also non-flat, then go to runtime.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result_, Immediate(kStringRepresentationMask));
   __ j(not_zero, &call_runtime_);
+  __ jmp(&flat_string);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
@@ -4208,6 +4245,8 @@
   __ and_(rcx, Immediate(kStringRepresentationMask));
   __ cmpl(rcx, Immediate(kExternalStringTag));
   __ j(equal, &string_add_runtime);
+  // We cannot encounter sliced strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
   // Now check if both strings are ascii strings.
   // rax: first string
   // rbx: length of resulting flat string
@@ -4600,6 +4639,9 @@
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
+  if (FLAG_string_slices) {
+    __ jmp(&runtime);
+  }
   // Stack frame on entry.
   //  rsp[0]: return address
   //  rsp[8]: to
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 5567df2..dd35053 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -45,7 +45,6 @@
 
 
 static unsigned GetPropertyId(Property* property) {
-  if (property->is_synthetic()) return AstNode::kNoNumber;
   return property->id();
 }
 
@@ -665,97 +664,69 @@
   Comment cmnt(masm_, "[ Declaration");
   ASSERT(variable != NULL);  // Must have been resolved.
   Slot* slot = variable->AsSlot();
-  Property* prop = variable->AsProperty();
-
-  if (slot != NULL) {
-    switch (slot->type()) {
-      case Slot::PARAMETER:
-      case Slot::LOCAL:
-        if (mode == Variable::CONST) {
-          __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-          __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ movq(Operand(rbp, SlotOffset(slot)), result_register());
-        }
-        break;
-
-      case Slot::CONTEXT:
-        // We bypass the general EmitSlotSearch because we know more about
-        // this specific context.
-
-        // The variable in the decl always resides in the current function
-        // context.
-        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
-        if (FLAG_debug_code) {
-          // Check that we're not inside a with or catch context.
-          __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
-          __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
-          __ Check(not_equal, "Declaration in with context.");
-          __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
-          __ Check(not_equal, "Declaration in catch context.");
-        }
-        if (mode == Variable::CONST) {
-          __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-          __ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
-          // No write barrier since the hole value is in old space.
-        } else if (function != NULL) {
-          VisitForAccumulatorValue(function);
-          __ movq(ContextOperand(rsi, slot->index()), result_register());
-          int offset = Context::SlotOffset(slot->index());
-          __ movq(rbx, rsi);
-          __ RecordWrite(rbx, offset, result_register(), rcx);
-        }
-        break;
-
-      case Slot::LOOKUP: {
-        __ push(rsi);
-        __ Push(variable->name());
-        // Declaration nodes are always introduced in one of two modes.
-        ASSERT(mode == Variable::VAR ||
-               mode == Variable::CONST ||
-               mode == Variable::LET);
-        PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
-        __ Push(Smi::FromInt(attr));
-        // Push initial value, if any.
-        // Note: For variables we must not push an initial value (such as
-        // 'undefined') because we may have a (legal) redeclaration and we
-        // must not destroy the current value.
-        if (mode == Variable::CONST) {
-          __ PushRoot(Heap::kTheHoleValueRootIndex);
-        } else if (function != NULL) {
-          VisitForStackValue(function);
-        } else {
-          __ Push(Smi::FromInt(0));  // no initial value!
-        }
-        __ CallRuntime(Runtime::kDeclareContextSlot, 4);
-        break;
+  ASSERT(slot != NULL);
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+    case Slot::LOCAL:
+      if (mode == Variable::CONST) {
+        __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+        __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ movq(Operand(rbp, SlotOffset(slot)), result_register());
       }
-    }
+      break;
 
-  } else if (prop != NULL) {
-    // A const declaration aliasing a parameter is an illegal redeclaration.
-    ASSERT(mode != Variable::CONST);
-    if (function != NULL) {
-      // We are declaring a function that rewrites to a property.
-      // Use (keyed) IC to set the initial value.  We cannot visit the
-      // rewrite because it's shared and we risk recording duplicate AST
-      // IDs for bailouts from optimized code.
-      ASSERT(prop->obj()->AsVariableProxy() != NULL);
-      { AccumulatorValueContext for_object(this);
-        EmitVariableLoad(prop->obj()->AsVariableProxy());
+    case Slot::CONTEXT:
+      // We bypass the general EmitSlotSearch because we know more about
+      // this specific context.
+
+      // The variable in the decl always resides in the current function
+      // context.
+      ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+      if (FLAG_debug_code) {
+        // Check that we're not inside a with or catch context.
+        __ movq(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
+        __ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
+        __ Check(not_equal, "Declaration in with context.");
+        __ CompareRoot(rbx, Heap::kCatchContextMapRootIndex);
+        __ Check(not_equal, "Declaration in catch context.");
       }
-      __ push(rax);
-      VisitForAccumulatorValue(function);
-      __ pop(rdx);
-      ASSERT(prop->key()->AsLiteral() != NULL &&
-             prop->key()->AsLiteral()->handle()->IsSmi());
-      __ Move(rcx, prop->key()->AsLiteral()->handle());
+      if (mode == Variable::CONST) {
+        __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+        __ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
+        // No write barrier since the hole value is in old space.
+      } else if (function != NULL) {
+        VisitForAccumulatorValue(function);
+        __ movq(ContextOperand(rsi, slot->index()), result_register());
+        int offset = Context::SlotOffset(slot->index());
+        __ movq(rbx, rsi);
+        __ RecordWrite(rbx, offset, result_register(), rcx);
+      }
+      break;
 
-      Handle<Code> ic = is_strict_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-          : isolate()->builtins()->KeyedStoreIC_Initialize();
-      __ call(ic);
+    case Slot::LOOKUP: {
+      __ push(rsi);
+      __ Push(variable->name());
+      // Declaration nodes are always introduced in one of two modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
+      __ Push(Smi::FromInt(attr));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (mode == Variable::CONST) {
+        __ PushRoot(Heap::kTheHoleValueRootIndex);
+      } else if (function != NULL) {
+        VisitForStackValue(function);
+      } else {
+        __ Push(Smi::FromInt(0));  // no initial value!
+      }
+      __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      break;
     }
   }
 }
@@ -2169,38 +2140,10 @@
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
       // Call to a keyed property.
-      // For a synthetic property use keyed load IC followed by function call,
-      // for a regular property use EmitKeyedCallWithIC.
-      if (prop->is_synthetic()) {
-        // Do not visit the object and key subexpressions (they are shared
-        // by all occurrences of the same rewritten parameter).
-        ASSERT(prop->obj()->AsVariableProxy() != NULL);
-        ASSERT(prop->obj()->AsVariableProxy()->var()->AsSlot() != NULL);
-        Slot* slot = prop->obj()->AsVariableProxy()->var()->AsSlot();
-        MemOperand operand = EmitSlotSearch(slot, rdx);
-        __ movq(rdx, operand);
-
-        ASSERT(prop->key()->AsLiteral() != NULL);
-        ASSERT(prop->key()->AsLiteral()->handle()->IsSmi());
-        __ Move(rax, prop->key()->AsLiteral()->handle());
-
-        // Record source code position for IC call.
-        SetSourcePosition(prop->position());
-
-        Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-        __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
-        // Push result (function).
-        __ push(rax);
-        // Push Global receiver.
-        __ movq(rcx, GlobalObjectOperand());
-        __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
-        EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
-      } else {
-        { PreservePositionScope scope(masm()->positions_recorder());
-          VisitForStackValue(prop->obj());
-        }
-        EmitKeyedCallWithIC(expr, prop->key());
+      { PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(prop->obj());
       }
+      EmitKeyedCallWithIC(expr, prop->key());
     }
   } else {
     { PreservePositionScope scope(masm()->positions_recorder());
@@ -3518,39 +3461,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // Load the function into rax.
-  VisitForAccumulatorValue(args->at(0));
-
-  // Prepare for the test.
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  // Test for strict mode function.
-  __ movq(rdx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
-  __ j(not_equal, if_true);
-
-  // Test for native function.
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
-  __ j(not_equal, if_true);
-
-  // Not native or strict-mode function.
-  __ jmp(if_false);
-
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Handle<String> name = expr->name();
   if (name->length() > 0 && name->Get(0) == '_') {
@@ -3599,17 +3509,11 @@
       Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
 
       if (prop != NULL) {
-        if (prop->is_synthetic()) {
-          // Result of deleting parameters is false, even when they rewrite
-          // to accesses on the arguments object.
-          context()->Plug(false);
-        } else {
-          VisitForStackValue(prop->obj());
-          VisitForStackValue(prop->key());
-          __ Push(Smi::FromInt(strict_mode_flag()));
-          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
-          context()->Plug(rax);
-        }
+        VisitForStackValue(prop->obj());
+        VisitForStackValue(prop->key());
+        __ Push(Smi::FromInt(strict_mode_flag()));
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(rax);
       } else if (var != NULL) {
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index acbac44..76a9453 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3200,95 +3200,80 @@
   };
 
   Register string = ToRegister(instr->string());
-  Register index = no_reg;
-  int const_index = -1;
-  if (instr->index()->IsConstantOperand()) {
-    const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
-    if (!Smi::IsValid(const_index)) {
-      // Guaranteed to be out of bounds because of the assert above.
-      // So the bounds check that must dominate this instruction must
-      // have deoptimized already.
-      if (FLAG_debug_code) {
-        __ Abort("StringCharCodeAt: out of bounds index.");
-      }
-      // No code needs to be generated.
-      return;
-    }
-  } else {
-    index = ToRegister(instr->index());
-  }
+  Register index = ToRegister(instr->index());
   Register result = ToRegister(instr->result());
 
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  Label flat_string, ascii_string, done;
-
   // Fetch the instance type of the receiver into result register.
   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
 
-  // We need special handling for non-sequential strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string, Label::kNear);
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ testb(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
 
-  // Handle cons strings and go to deferred code for the rest.
-  __ testb(result, Immediate(kIsConsStringMask));
-  __ j(zero, deferred->entry());
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
+  ASSERT(IsPowerOf2(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
+  __ testb(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
 
-  // ConsString.
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ addq(index, result);
+  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle conses.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
   // the case we would rather go to the runtime system now to flatten
   // the string.
+  __ bind(&cons_string);
   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
                  Heap::kEmptyStringRootIndex);
   __ j(not_equal, deferred->entry());
-  // Get the first of the two strings and load its instance type.
   __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-  // If the first cons component is also non-flat, then go to runtime.
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
   STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result, Immediate(kStringRepresentationMask));
   __ j(not_zero, deferred->entry());
 
-  // Check for ASCII or two-byte string.
-  __ bind(&flat_string);
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
   STATIC_ASSERT(kAsciiStringTag != 0);
   __ testb(result, Immediate(kStringEncodingMask));
   __ j(not_zero, &ascii_string, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
+  Label done;
   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzxwl(result,
-               FieldOperand(string,
-                            SeqTwoByteString::kHeaderSize +
-                            (kUC16Size * const_index)));
-  } else {
-    __ movzxwl(result, FieldOperand(string,
-                                    index,
-                                    times_2,
-                                    SeqTwoByteString::kHeaderSize));
-  }
+  __ movzxwl(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
   __ jmp(&done, Label::kNear);
 
   // ASCII string.
   // Load the byte into the result register.
   __ bind(&ascii_string);
-  if (instr->index()->IsConstantOperand()) {
-    __ movzxbl(result, FieldOperand(string,
-                                    SeqAsciiString::kHeaderSize + const_index));
-  } else {
-    __ movzxbl(result, FieldOperand(string,
-                                    index,
-                                    times_1,
-                                    SeqAsciiString::kHeaderSize));
-  }
+  __ movzxbl(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
   __ bind(&done);
   __ bind(deferred->exit());
 }
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 9d08d37..bd31956 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1984,8 +1984,8 @@
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseRegister(instr->string());
-  LOperand* index = UseRegisterOrConstant(instr->index());
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
   LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 395466e..7f80447 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -1170,12 +1170,13 @@
   }
 
   // Prepare for possible GC.
-  HandleScope handles;
+  HandleScope handles(isolate);
   Handle<Code> code_handle(re_code);
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
   // Current string.
-  bool is_ascii = subject->IsAsciiRepresentation();
+  bool is_ascii = subject->IsAsciiRepresentationUnderneath();
 
   ASSERT(re_code->instruction_start() <= *return_address);
   ASSERT(*return_address <=
@@ -1193,8 +1194,20 @@
     return EXCEPTION;
   }
 
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
   // String might have changed.
-  if (subject->IsAsciiRepresentation() != is_ascii) {
+  if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
     // If we changed between an ASCII and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
@@ -1205,8 +1218,8 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject).IsSequential() ||
-      StringShape(*subject).IsExternal());
+  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
   const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
@@ -1214,7 +1227,8 @@
   // Find the current start address of the same character at the current string
   // position.
   int start_index = frame_entry<int>(re_frame, kStartIndex);
-  const byte* new_address = StringCharacterPosition(*subject, start_index);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
 
   if (start_address != new_address) {
     // If there is a difference, update the object pointer and start and end
diff --git a/src/zone.h b/src/zone.h
index 4dfd01d..f60ac0d 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -152,6 +152,7 @@
   // ZoneObjects should never be deleted individually; use
   // Zone::DeleteAll() to delete all zone objects in one go.
   void operator delete(void*, size_t) { UNREACHABLE(); }
+  void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
 
@@ -197,6 +198,9 @@
       : List<T, ZoneListAllocationPolicy>(other.length()) {
     AddAll(other);
   }
+
+  void operator delete(void* pointer) { UNREACHABLE(); }
+  void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
 };
 
 
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 5843440..c0b5316 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -26,10 +26,10 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
+  'includes': ['../../build/common.gypi'],
   'variables': {
     'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
   },
-  'includes': [ '../../build/v8-features.gypi' ],
   'targets': [
     {
       'target_name': 'cctest',
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index aeb4cbe..f2af81e 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -14452,34 +14452,34 @@
   v8::Handle<v8::RegExp> re = v8::RegExp::New(v8_str("foo"), v8::RegExp::kNone);
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("foo")));
-  CHECK_EQ(re->GetFlags(), v8::RegExp::kNone);
+  CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
 
   re = v8::RegExp::New(v8_str("bar"),
                        static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
                                                       v8::RegExp::kGlobal));
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("bar")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kIgnoreCase | v8::RegExp::kGlobal);
+  CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kGlobal,
+           static_cast<int>(re->GetFlags()));
 
   re = v8::RegExp::New(v8_str("baz"),
                        static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
                                                       v8::RegExp::kMultiline));
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("baz")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline);
+  CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline,
+           static_cast<int>(re->GetFlags()));
 
   re = CompileRun("/quux/").As<v8::RegExp>();
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("quux")));
-  CHECK_EQ(re->GetFlags(), v8::RegExp::kNone);
+  CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
 
   re = CompileRun("/quux/gm").As<v8::RegExp>();
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("quux")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kGlobal | v8::RegExp::kMultiline);
+  CHECK_EQ(v8::RegExp::kGlobal | v8::RegExp::kMultiline,
+           static_cast<int>(re->GetFlags()));
 
   // Override the RegExp constructor and check the API constructor
   // still works.
@@ -14488,15 +14488,15 @@
   re = v8::RegExp::New(v8_str("foobar"), v8::RegExp::kNone);
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("foobar")));
-  CHECK_EQ(re->GetFlags(), v8::RegExp::kNone);
+  CHECK_EQ(v8::RegExp::kNone, re->GetFlags());
 
   re = v8::RegExp::New(v8_str("foobarbaz"),
                        static_cast<v8::RegExp::Flags>(v8::RegExp::kIgnoreCase |
                                                       v8::RegExp::kMultiline));
   CHECK(re->IsRegExp());
   CHECK(re->GetSource()->Equals(v8_str("foobarbaz")));
-  CHECK_EQ(static_cast<int>(re->GetFlags()),
-           v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline);
+  CHECK_EQ(v8::RegExp::kIgnoreCase | v8::RegExp::kMultiline,
+           static_cast<int>(re->GetFlags()));
 
   context->Global()->Set(v8_str("re"), re);
   ExpectTrue("re.test('FoobarbaZ')");
@@ -14937,3 +14937,216 @@
     }
   }
 }
+
+
+static bool BlockProtoNamedSecurityTestCallback(Local<v8::Object> global,
+                                                Local<Value> name,
+                                                v8::AccessType type,
+                                                Local<Value> data) {
+  // Only block read access to __proto__.
+  if (type == v8::ACCESS_GET &&
+      name->IsString() &&
+      name->ToString()->Length() == 9 &&
+      name->ToString()->Utf8Length() == 9) {
+    char buffer[10];
+    CHECK_EQ(10, name->ToString()->WriteUtf8(buffer));
+    return strncmp(buffer, "__proto__", 9) != 0;
+  }
+
+  return true;
+}
+
+
+THREADED_TEST(Regress93759) {
+  HandleScope scope;
+
+  // Template for object with security check.
+  Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New();
+  // We don't do indexing, so any callback can be used for that.
+  no_proto_template->SetAccessCheckCallbacks(
+      BlockProtoNamedSecurityTestCallback,
+      IndexedSecurityTestCallback);
+
+  // Templates for objects with hidden prototypes and possibly security check.
+  Local<FunctionTemplate> hidden_proto_template = v8::FunctionTemplate::New();
+  hidden_proto_template->SetHiddenPrototype(true);
+
+  Local<FunctionTemplate> protected_hidden_proto_template =
+      v8::FunctionTemplate::New();
+  protected_hidden_proto_template->InstanceTemplate()->SetAccessCheckCallbacks(
+      BlockProtoNamedSecurityTestCallback,
+      IndexedSecurityTestCallback);
+  protected_hidden_proto_template->SetHiddenPrototype(true);
+
+  // Context for "foreign" objects used in test.
+  Persistent<Context> context = v8::Context::New();
+  context->Enter();
+
+  // Plain object, no security check.
+  Local<Object> simple_object = Object::New();
+
+  // Object with explicit security check.
+  Local<Object> protected_object =
+      no_proto_template->NewInstance();
+
+  // JSGlobalProxy object, always have security check.
+  Local<Object> proxy_object =
+      context->Global();
+
+  // Global object, the  prototype of proxy_object. No security checks.
+  Local<Object> global_object =
+      proxy_object->GetPrototype()->ToObject();
+
+  // Hidden prototype without security check.
+  Local<Object> hidden_prototype =
+      hidden_proto_template->GetFunction()->NewInstance();
+  Local<Object> object_with_hidden =
+    Object::New();
+  object_with_hidden->SetPrototype(hidden_prototype);
+
+  // Hidden prototype with security check on the hidden prototype.
+  Local<Object> protected_hidden_prototype =
+      protected_hidden_proto_template->GetFunction()->NewInstance();
+  Local<Object> object_with_protected_hidden =
+    Object::New();
+  object_with_protected_hidden->SetPrototype(protected_hidden_prototype);
+
+  context->Exit();
+
+  // Template for object for second context. Values to test are put on it as
+  // properties.
+  Local<ObjectTemplate> global_template = ObjectTemplate::New();
+  global_template->Set(v8_str("simple"), simple_object);
+  global_template->Set(v8_str("protected"), protected_object);
+  global_template->Set(v8_str("global"), global_object);
+  global_template->Set(v8_str("proxy"), proxy_object);
+  global_template->Set(v8_str("hidden"), object_with_hidden);
+  global_template->Set(v8_str("phidden"), object_with_protected_hidden);
+
+  LocalContext context2(NULL, global_template);
+
+  Local<Value> result1 = CompileRun("Object.getPrototypeOf(simple)");
+  CHECK(result1->Equals(simple_object->GetPrototype()));
+
+  Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
+  CHECK(result2->Equals(Undefined()));
+
+  Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
+  CHECK(result3->Equals(global_object->GetPrototype()));
+
+  Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
+  CHECK(result4->Equals(Undefined()));
+
+  Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
+  CHECK(result5->Equals(
+      object_with_hidden->GetPrototype()->ToObject()->GetPrototype()));
+
+  Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
+  CHECK(result6->Equals(Undefined()));
+
+  context.Dispose();
+}
+
+
+static void TestReceiver(Local<Value> expected_result,
+                         Local<Value> expected_receiver,
+                         const char* code) {
+  Local<Value> result = CompileRun(code);
+  CHECK(result->IsObject());
+  CHECK(expected_receiver->Equals(result->ToObject()->Get(1)));
+  CHECK(expected_result->Equals(result->ToObject()->Get(0)));
+}
+
+
+THREADED_TEST(ForeignFunctionReceiver) {
+  HandleScope scope;
+
+  // Create two contexts with different "id" properties ('i' and 'o').
+  // Call a function both from its own context and from a the foreign
+  // context, and see what "this" is bound to (returning both "this"
+  // and "this.id" for comparison).
+
+  Persistent<Context> foreign_context = v8::Context::New();
+  foreign_context->Enter();
+  Local<Value> foreign_function =
+    CompileRun("function func() { return { 0: this.id, "
+               "                           1: this, "
+               "                           toString: function() { "
+               "                               return this[0];"
+               "                           }"
+               "                         };"
+               "}"
+               "var id = 'i';"
+               "func;");
+  CHECK(foreign_function->IsFunction());
+  foreign_context->Exit();
+
+  LocalContext context;
+
+  Local<String> password = v8_str("Password");
+  // Don't get hit by security checks when accessing foreign_context's
+  // global receiver (aka. global proxy).
+  context->SetSecurityToken(password);
+  foreign_context->SetSecurityToken(password);
+
+  Local<String> i = v8_str("i");
+  Local<String> o = v8_str("o");
+  Local<String> id = v8_str("id");
+
+  CompileRun("function ownfunc() { return { 0: this.id, "
+             "                              1: this, "
+             "                              toString: function() { "
+             "                                  return this[0];"
+             "                              }"
+             "                             };"
+             "}"
+             "var id = 'o';"
+             "ownfunc");
+  context->Global()->Set(v8_str("func"), foreign_function);
+
+  // Sanity check the contexts.
+  CHECK(i->Equals(foreign_context->Global()->Get(id)));
+  CHECK(o->Equals(context->Global()->Get(id)));
+
+  // Checking local function's receiver.
+  // Calling function using its call/apply methods.
+  TestReceiver(o, context->Global(), "ownfunc.call()");
+  TestReceiver(o, context->Global(), "ownfunc.apply()");
+  // Making calls through built-in functions.
+  TestReceiver(o, context->Global(), "[1].map(ownfunc)[0]");
+  CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/,ownfunc)[1]")));
+  CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/g,ownfunc)[1]")));
+  CHECK(o->Equals(CompileRun("'abcbd'.replace(/b/g,ownfunc)[3]")));
+  // Calling with environment record as base.
+  TestReceiver(o, context->Global(), "ownfunc()");
+  // Calling with no base.
+  TestReceiver(o, context->Global(), "(1,ownfunc)()");
+
+  // Checking foreign function return value.
+  // Calling function using its call/apply methods.
+  TestReceiver(i, foreign_context->Global(), "func.call()");
+  TestReceiver(i, foreign_context->Global(), "func.apply()");
+  // Calling function using another context's call/apply methods.
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.call.call(func)");
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.call.apply(func)");
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.apply.call(func)");
+  TestReceiver(i, foreign_context->Global(),
+               "Function.prototype.apply.apply(func)");
+  // Making calls through built-in functions.
+  TestReceiver(i, foreign_context->Global(), "[1].map(func)[0]");
+  // ToString(func()) is func()[0], i.e., the returned this.id.
+  CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/,func)[1]")));
+  CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[1]")));
+  CHECK(i->Equals(CompileRun("'abcbd'.replace(/b/g,func)[3]")));
+
+  // TODO(1547): Make the following also return "i".
+  // Calling with environment record as base.
+  TestReceiver(o, context->Global(), "func()");
+  // Calling with no base.
+  TestReceiver(o, context->Global(), "(1,func)()");
+
+  foreign_context.Dispose();
+}
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index 8ac89f6..065569d 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -1083,17 +1083,17 @@
     CpuFeatures::Scope scope(FPU);
 
     __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
-    __ Cvt_d_uw(f10, t0);
+    __ Cvt_d_uw(f10, t0, f22);
     __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
 
-    __ Trunc_uw_d(f10, f10);
+    __ Trunc_uw_d(f10, f10, f22);
     __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
 
     __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
-    __ Cvt_d_uw(f8, t0);
+    __ Cvt_d_uw(f8, t0, f22);
     __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
 
-    __ Trunc_uw_d(f8, f8);
+    __ Trunc_uw_d(f8, f8, f22);
     __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
 
     __ jr(ra);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 58d970c..b7962de 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -2174,7 +2174,7 @@
   f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
   g = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
 
-  // Chesk that a break point was hit when the script was run.
+  // Check that a break point was hit when the script was run.
   CHECK_EQ(1, break_point_hit_count);
   CHECK_EQ(0, StrLength(last_function_hit));
 
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 4d9b264..17020a3 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -430,8 +430,7 @@
     "  return 0;"
     "};"
     "test()";
-  CHECK_EQ(0,
-           v8::Script::Compile(v8::String::New(source))->Run()->Int32Value());
+  CHECK_EQ(0, CompileRun(source)->Int32Value());
 }
 
 
@@ -481,3 +480,52 @@
     }
   }
 }
+
+
+TEST(SliceFromCons) {
+  FLAG_string_slices = true;
+  InitializeVM();
+  v8::HandleScope scope;
+  Handle<String> string =
+      FACTORY->NewStringFromAscii(CStrVector("parentparentparent"));
+  Handle<String> parent = FACTORY->NewConsString(string, string);
+  CHECK(parent->IsConsString());
+  CHECK(!parent->IsFlat());
+  Handle<String> slice = FACTORY->NewSubString(parent, 1, 25);
+  // After slicing, the original string becomes a flat cons.
+  CHECK(parent->IsFlat());
+  CHECK(slice->IsSlicedString());
+  CHECK_EQ(SlicedString::cast(*slice)->parent(),
+           ConsString::cast(*parent)->first());
+  CHECK(SlicedString::cast(*slice)->parent()->IsSeqString());
+  CHECK(slice->IsFlat());
+}
+
+
+TEST(TrivialSlice) {
+  // This tests whether a slice that contains the entire parent string
+  // actually creates a new string (it should not).
+  FLAG_string_slices = true;
+  InitializeVM();
+  HandleScope scope;
+  v8::Local<v8::Value> result;
+  Handle<String> string;
+  const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
+  const char* check = "str.slice(0,26)";
+  const char* crosscheck = "str.slice(1,25)";
+
+  CompileRun(init);
+
+  result = CompileRun(check);
+  CHECK(result->IsString());
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK(!string->IsSlicedString());
+
+  string = FACTORY->NewSubString(string, 0, 26);
+  CHECK(!string->IsSlicedString());
+  result = CompileRun(crosscheck);
+  CHECK(result->IsString());
+  string = v8::Utils::OpenHandle(v8::String::Cast(*result));
+  CHECK(string->IsSlicedString());
+  CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+}
diff --git a/test/mjsunit/assert-opt-and-deopt.js b/test/mjsunit/assert-opt-and-deopt.js
index f589868..c9adb5b 100644
--- a/test/mjsunit/assert-opt-and-deopt.js
+++ b/test/mjsunit/assert-opt-and-deopt.js
@@ -54,7 +54,7 @@
  * that you later want to track de/optimizations for. It is necessary because
  * tests are sometimes executed several times in a row, and you want to
  * disregard counts from previous runs.
- */ 
+ */
 OptTracker.prototype.CheckpointOptCount = function(func) {
   this.opt_counts_[func] = %GetOptimizationCount(func);
 };
@@ -148,7 +148,7 @@
 tracker.AssertDeoptHappened(f, false);
 tracker.AssertDeoptCount(f, 0);
 
-for (var i = 0; i < 2; i++) f(1);
+f(1);
 
 tracker.AssertOptCount(f, 0);
 tracker.AssertIsOptimized(f, false);
diff --git a/test/mjsunit/debug-script.js b/test/mjsunit/debug-script.js
index 643dd8c..9767888 100644
--- a/test/mjsunit/debug-script.js
+++ b/test/mjsunit/debug-script.js
@@ -34,13 +34,19 @@
 
 // Count script types.
 var named_native_count = 0;
+var named_native_names = {};
 var extension_count = 0;
 var normal_count = 0;
 var scripts = Debug.scripts();
 for (i = 0; i < scripts.length; i++) {
   if (scripts[i].type == Debug.ScriptType.Native) {
     if (scripts[i].name) {
-      named_native_count++;
+      // TODO(1641): Remove check for equally named native scripts once the
+      // underlying issue is fixed.
+      if (!named_native_names[scripts[i].name]) {
+        named_native_names[scripts[i].name] = true;
+        named_native_count++;
+      }
     }
   } else if (scripts[i].type == Debug.ScriptType.Extension) {
     extension_count++;
diff --git a/test/mjsunit/function-caller.js b/test/mjsunit/function-caller.js
index ddc7b5d..bc01750 100644
--- a/test/mjsunit/function-caller.js
+++ b/test/mjsunit/function-caller.js
@@ -46,3 +46,10 @@
 // Check called from eval.
 eval('f(null)');
 
+// Check called from builtin functions. Only show the initially called
+// (publicly exposed) builtin function, not it's internal helper functions.
+[Array.prototype.sort, Array.prototype.sort].sort(f);
+
+"abel".replace(/b/g, function h() {
+   assertEquals(String.prototype.replace, h.caller);
+});
diff --git a/test/mjsunit/string-slices-regexp.js b/test/mjsunit/string-slices-regexp.js
new file mode 100644
index 0000000..a8cadae
--- /dev/null
+++ b/test/mjsunit/string-slices-regexp.js
@@ -0,0 +1,81 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --string-slices
+
+//assertEquals('345"12345 6"1234567"123',
+//             '12345""12345 6""1234567""1234'.slice(2,-1).replace(/""/g, '"'));
+
+var foo = "lsdfj sldkfj sdklfj læsdfjl sdkfjlsdk fjsdl fjsdljskdj flsj flsdkj flskd regexp: /foobar/\nldkfj sdlkfj sdkl";
+for(var i = 0; i < 1000; i++) {
+  assertTrue(/^([a-z]+): (.*)/.test(foo.substring(foo.indexOf("regexp:"))));
+  assertEquals("regexp", RegExp.$1, "RegExp.$1");
+}
+
+var re = /^(((N({)?)|(R)|(U)|(V)|(B)|(H)|(n((n)|(r)|(v)|(h))?)|(r(r)?)|(v)|(b((n)|(b))?)|(h))|((Y)|(A)|(E)|(o(u)?)|(p(u)?)|(q(u)?)|(s)|(t)|(u)|(w)|(x(u)?)|(y)|(z)|(a((T)|(A)|(L))?)|(c)|(e)|(f(u)?)|(g(u)?)|(i)|(j)|(l)|(m(u)?)))+/;
+var r = new RegExp(re)
+var str = "_Avtnennan gunzvmu pubExnY nEvln vaTxh rmuhguhaTxnY_".slice(1,-1);
+str = str + str;
+assertTrue(r.test(str));
+assertTrue(r.test(str));
+var re = /x/;
+assertEquals("a.yb", "_axyb_".slice(1,-1).replace(re, "."));
+re.compile("y");
+assertEquals("ax.b", "_axyb_".slice(1,-1).replace(re, "."));
+re.compile("(x)");
+assertEquals(["x", "x"], re.exec("_axyb_".slice(1,-1)));
+re.compile("(y)");
+assertEquals(["y", "y"], re.exec("_axyb_".slice(1,-1)));
+
+for(var i = 0; i < 100; i++) {
+  var a = "aaaaaaaaaaaaaaaaaaaaaaaabbaacabbabaaaaabbaaaabbac".slice(24,-1);
+  var b = "bbaacabbabaaaaabbaaaabba" + a;
+  // The first time, the cons string will be flattened and handled by the
+  // runtime system.
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(b));
+  // The second time, the cons string is already flattened and will be
+  // handled by generated code.
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(b));
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(a));
+  assertEquals(["bbaa", "a", "", "a"], /((\3|b)\2(a)){2,}/.exec(a));
+}
+
+var c = "ABCDEFGHIJKLMN".slice(2,-2);
+var d = "ABCDEF\u1234GHIJKLMN".slice(2,-2);
+var e = "ABCDEFGHIJKLMN".slice(0,-2);
+assertTrue(/^C.*L$/.test(c));
+assertTrue(/^C.*L$/.test(c));
+assertTrue(/^C.*L$/.test(d));
+assertTrue(/^C.*L$/.test(d));
+assertTrue(/^A\w{10}L$/.test(e));
+assertTrue(/^A\w{10}L$/.test(e));
+
+var e = "qui-opIasd-fghjklzx-cvbn-mqwer-tyuio-pasdf-ghIjkl-zx".slice(6,-6);
+var e_split = e.split("-");
+assertEquals(e_split[0], "Iasd");
+assertEquals(e_split[1], "fghjklzx");
+assertEquals(e_split[6], "ghI");
diff --git a/test/mjsunit/string-slices.js b/test/mjsunit/string-slices.js
new file mode 100755
index 0000000..b0b05ec
--- /dev/null
+++ b/test/mjsunit/string-slices.js
@@ -0,0 +1,198 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --string-slices --expose-externalize-string
+
+var s = 'abcdefghijklmn';
+assertEquals(s, s.substr());
+assertEquals(s, s.substr(0));
+assertEquals(s, s.substr('0'));
+assertEquals(s, s.substr(void 0));
+assertEquals(s, s.substr(null));
+assertEquals(s, s.substr(false));
+assertEquals(s, s.substr(0.9));
+assertEquals(s, s.substr({ valueOf: function() { return 0; } }));
+assertEquals(s, s.substr({ toString: function() { return '0'; } }));
+
+var s1 = s.substring(1);
+assertEquals(s1, s.substr(1));
+assertEquals(s1, s.substr('1'));
+assertEquals(s1, s.substr(true));
+assertEquals(s1, s.substr(1.1));
+assertEquals(s1, s.substr({ valueOf: function() { return 1; } }));
+assertEquals(s1, s.substr({ toString: function() { return '1'; } }));
+
+
+assertEquals(s.substring(s.length - 1), s.substr(-1));
+assertEquals(s.substring(s.length - 1), s.substr(-1.2));
+assertEquals(s.substring(s.length - 1), s.substr(-1.7));
+assertEquals(s.substring(s.length - 2), s.substr(-2));
+assertEquals(s.substring(s.length - 2), s.substr(-2.3));
+assertEquals(s.substring(s.length - 2, s.length - 1), s.substr(-2, 1));
+assertEquals(s, s.substr(-100));
+assertEquals('abc', s.substr(-100, 3));
+assertEquals(s1, s.substr(-s.length + 1));
+
+// assertEquals('', s.substr(0, void 0)); // smjs and rhino 
+assertEquals('abcdefghijklmn', s.substr(0, void 0));  // kjs and v8
+assertEquals('', s.substr(0, null));
+assertEquals(s, s.substr(0, String(s.length)));
+assertEquals('a', s.substr(0, true));
+
+
+// Test substrings of different lengths and alignments.
+// First ASCII.
+var x = "ASCII";
+for (var i = 0; i < 25; i++) {
+  x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x);  // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+  for (var j = 12; j < 25; j++) {
+    var z = x.substring(i, i+j);
+    var w = Math.random() * 42;  // Allocate something new in new-space.
+    assertEquals(j, z.length);
+    for (var k = 0; k < j; k++) {
+      assertEquals(x.charAt(i+k), z.charAt(k));
+    }
+  }
+}
+// Then two-byte strings.
+x = "UC16\u2028";  // Non-ascii char forces two-byte string.
+for (var i = 0; i < 25; i++) {
+  x += (i >> 4).toString(16) + (i & 0x0f).toString(16);
+}
+/x/.exec(x);  // Try to force a flatten.
+for (var i = 5; i < 25; i++) {
+  for (var j = 0; j < 25; j++) {
+    var z = x.substring(i, i + j);
+    var w = Math.random() * 42;  // Allocate something new in new-space.
+    assertEquals(j, z.length);
+    for (var k = 0; k < j; k++) {
+      assertEquals(x.charAt(i+k), z.charAt(k));
+    }
+  }
+}
+
+// Keep creating strings to to force allocation failure on substring creation.
+var x = "0123456789ABCDEF";
+x += x;  // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x;  // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 10000; i++) {
+  var z = x.substring(i % xl);
+  assertEquals(xl - (i % xl), z.length);
+  cache.push(z);
+}
+
+
+// Same with two-byte strings
+var x = "\u2028123456789ABCDEF";
+x += x;  // 2^5
+x += x;
+x += x;
+x += x;
+x += x;
+x += x;  // 2^10
+x += x;
+x += x;
+var xl = x.length;
+var cache = [];
+for (var i = 0; i < 10000; i++) {
+  var z = x.substring(i % xl);
+  assertEquals(xl - (i % xl), z.length);
+  cache.push(z);
+}
+
+// Substring of substring.
+var cache = [];
+var last = x;
+var offset = 0;
+for (var i = 0; i < 64; i++) {
+  var z = last.substring(i);
+  last = z;
+  cache.push(z);
+  offset += i;
+}
+for (var i = 63; i >= 0; i--) {
+  var z = cache.pop();
+  assertTrue(/\u2028123456789ABCDEF/.test(z));
+  assertEquals(xl - offset, z.length);
+  offset -= i;
+}
+
+// Test charAt for different strings.
+function f(s1, s2, s3, i) {
+  assertEquals(String.fromCharCode(97+i%11), s1.charAt(i%11));
+  assertEquals(String.fromCharCode(97+i%11), s2.charAt(i%11));
+  assertEquals(String.fromCharCode(98+i%11), s3.charAt(i%11));
+  assertEquals(String.fromCharCode(101), s3.charAt(3));
+}
+
+flat = "abcdefghijkl12345";
+cons = flat + flat.toUpperCase();
+slice = "abcdefghijklmn12345".slice(1, -1);
+for ( var i = 0; i < 1000; i++) {
+  f(flat, cons, slice, i);
+}
+flat = "abcdefghijkl1\u20232345";
+cons = flat + flat.toUpperCase();
+slice = "abcdefghijklmn1\u20232345".slice(1, -1);
+for ( var i = 0; i < 1000; i++) {
+  f(flat, cons, slice, i);
+}
+
+// Concatenate substrings.
+var ascii = 'abcdefghijklmnop';
+var utf = '\u03B1\u03B2\u03B3\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9\u03BA\u03BB';
+assertEquals("klmno", ascii.substring(10,15) + ascii.substring(16));
+assertEquals("\u03B4\u03B7", utf.substring(3,4) + utf.substring(6,7));
+assertEquals("klp", ascii.substring(10,12) + ascii.substring(15,16));
+assertEquals("\u03B1\u03B4\u03B5", utf.substring(0,1) + utf.substring(5,3));
+assertEquals("", ascii.substring(16) + utf.substring(16));
+assertEquals("bcdef\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9",
+    ascii.substring(1,6) + utf.substring(3,9));
+assertEquals("\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9abcdefghijklmnop",
+    utf.substring(3,9) + ascii);
+assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
+    utf.substring(5,1) + utf.substring(3,7));
+
+/*
+// Externalizing strings.
+var a = "123456789qwertyuiopasdfghjklzxcvbnm";
+var b = a.slice(1,-1);
+assertEquals(a.slice(1,-1), b);
+externalizeString(a);
+assertEquals(a.slice(1,-1), b);
+*/
\ No newline at end of file
diff --git a/test/mjsunit/substr.js b/test/mjsunit/substr.js
index f69a9c0..cffaf94 100755
--- a/test/mjsunit/substr.js
+++ b/test/mjsunit/substr.js
@@ -135,3 +135,20 @@
   assertEquals(xl - (i % xl), z.length);
   cache.push(z);
 }
+
+// Substring of substring.
+var cache = [];
+var last = x;
+var offset = 0;
+for (var i = 0; i < 64; i++) {
+  var z = last.substring(i);
+  last = z;
+  cache.push(z);
+  offset += i;
+}
+for (var i = 63; i >= 0; i--) {
+  var z = cache.pop();
+  assertTrue(/\u2028123456789ABCDEF/.test(z));
+  assertEquals(xl - offset, z.length);
+  offset -= i;
+}
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 97dc392..56ebeed 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -26,231 +26,9 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 {
-  'variables': {
-    'use_system_v8%': 0,
-    'msvs_use_common_release': 0,
-    'gcc_version%': 'unknown',
-    'v8_compress_startup_data%': 'off',
-    'v8_target_arch%': '<(target_arch)',
-
-    # Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
-    # generated by V8 to do unaligned memory access, and setting it to 'false'
-    # will ensure that the generated code will always do aligned memory
-    # accesses. The default value of 'default' will try to determine the correct
-    # setting. Note that for Intel architectures (ia32 and x64) unaligned memory
-    # access is allowed for all CPUs.
-    'v8_can_use_unaligned_accesses%': 'default',
-
-    # Setting 'v8_can_use_vfp_instructions' to 'true' will enable use of ARM VFP
-    # instructions in the V8 generated code. VFP instructions will be enabled
-    # both for the snapshot and for the ARM target. Leaving the default value
-    # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
-    # probing when running on the target.
-    'v8_can_use_vfp_instructions%': 'false',
-
-    # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
-    # EABI calling convention where double arguments are passed in VFP
-    # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
-    # well when compiling for the ARM target.
-    'v8_use_arm_eabi_hardfloat%': 'false',
-
-    'v8_enable_debugger_support%': 1,
-
-    # Chrome needs this definition unconditionally. For standalone V8 builds,
-    # it's handled in common.gypi.
-    'want_separate_host_toolset%': 1,
-
-    'v8_use_snapshot%': 'true',
-    'host_os%': '<(OS)',
-    'v8_use_liveobjectlist%': 'false',
-  },
+  'includes': ['../../build/common.gypi'],
   'conditions': [
     ['use_system_v8==0', {
-      'target_defaults': {
-        'conditions': [
-          ['v8_enable_debugger_support==1', {
-              'defines': ['ENABLE_DEBUGGER_SUPPORT',],
-            },
-          ],
-          ['OS!="mac"', {
-            # TODO(mark): The OS!="mac" conditional is temporary. It can be
-            # removed once the Mac Chromium build stops setting target_arch to
-            # ia32 and instead sets it to mac. Other checks in this file for
-            # OS=="mac" can be removed at that time as well. This can be cleaned
-            # up once http://crbug.com/44205 is fixed.
-            'conditions': [
-              ['v8_target_arch=="arm"', {
-                'defines': [
-                  'V8_TARGET_ARCH_ARM',
-                ],
-                'conditions': [
-                  [ 'v8_can_use_unaligned_accesses=="true"', {
-                    'defines': [
-                      'CAN_USE_UNALIGNED_ACCESSES=1',
-                    ],
-                  }],
-                  [ 'v8_can_use_unaligned_accesses=="false"', {
-                    'defines': [
-                      'CAN_USE_UNALIGNED_ACCESSES=0',
-                    ],
-                  }],
-                  [ 'v8_can_use_vfp_instructions=="true"', {
-                    'defines': [
-                      'CAN_USE_VFP_INSTRUCTIONS',
-                    ],
-                  }],
-                  [ 'v8_use_arm_eabi_hardfloat=="true"', {
-                    'defines': [
-                      'USE_EABI_HARDFLOAT=1',
-                      'CAN_USE_VFP_INSTRUCTIONS',
-                    ],
-                    'cflags': [
-                      '-mfloat-abi=hard',
-                    ],
-                  }, {
-                    'defines': [
-                      'USE_EABI_HARDFLOAT=0',
-                    ],
-                  }],
-                ],
-              }],
-              ['v8_target_arch=="ia32"', {
-                'defines': [
-                  'V8_TARGET_ARCH_IA32',
-                ],
-              }],
-              ['v8_target_arch=="x64"', {
-                'defines': [
-                  'V8_TARGET_ARCH_X64',
-                ],
-              }],
-            ],
-          }],
-          ['v8_use_liveobjectlist=="true"', {
-            'defines': [
-              'ENABLE_DEBUGGER_SUPPORT',
-              'INSPECTOR',
-              'OBJECT_PRINT',
-              'LIVEOBJECTLIST',
-            ],
-          }],
-         ['v8_compress_startup_data=="bz2"', {
-            'defines': [
-              'COMPRESS_STARTUP_DATA_BZ2',
-            ],
-          }],
-        ],
-        'configurations': {
-          'Debug': {
-            'defines': [
-              'DEBUG',
-              'ENABLE_DISASSEMBLER',
-              'V8_ENABLE_CHECKS',
-              'OBJECT_PRINT',
-            ],
-            'msvs_settings': {
-              'VCCLCompilerTool': {
-                'Optimization': '0',
-
-                'conditions': [
-                  ['OS=="win" and component=="shared_library"', {
-                    'RuntimeLibrary': '3',  # /MDd
-                  }, {
-                    'RuntimeLibrary': '1',  # /MTd
-                  }],
-                ],
-              },
-              'VCLinkerTool': {
-                'LinkIncremental': '2',
-                # For future reference, the stack size needs to be increased
-                # when building for Windows 64-bit, otherwise some test cases
-                # can cause stack overflow.
-                # 'StackReserveSize': '297152',
-              },
-            },
-            'conditions': [
-             ['OS=="freebsd" or OS=="openbsd"', {
-               'cflags': [ '-I/usr/local/include' ],
-             }],
-             ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-               'cflags': [ '-Wall', '-Werror', '-W', '-Wno-unused-parameter',
-                           '-Wnon-virtual-dtor' ],
-             }],
-           ],
-          },
-          'Release': {
-            'conditions': [
-              ['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-                'cflags!': [
-                  '-O2',
-                  '-Os',
-                ],
-                'cflags': [
-                  '-fomit-frame-pointer',
-                  '-O3',
-                ],
-                'conditions': [
-                  [ 'gcc_version==44', {
-                    'cflags': [
-                      # Avoid crashes with gcc 4.4 in the v8 test suite.
-                      '-fno-tree-vrp',
-                    ],
-                  }],
-                ],
-              }],
-             ['OS=="freebsd" or OS=="openbsd"', {
-               'cflags': [ '-I/usr/local/include' ],
-             }],
-              ['OS=="mac"', {
-                'xcode_settings': {
-                  'GCC_OPTIMIZATION_LEVEL': '3',  # -O3
-
-                  # -fstrict-aliasing.  Mainline gcc
-                  # enables this at -O2 and above,
-                  # but Apple gcc does not unless it
-                  # is specified explicitly.
-                  'GCC_STRICT_ALIASING': 'YES',
-                },
-              }],
-              ['OS=="win"', {
-                'msvs_configuration_attributes': {
-                  'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
-                  'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
-                  'CharacterSet': '1',
-                },
-                'msvs_settings': {
-                  'VCCLCompilerTool': {
-                    'Optimization': '2',
-                    'InlineFunctionExpansion': '2',
-                    'EnableIntrinsicFunctions': 'true',
-                    'FavorSizeOrSpeed': '0',
-                    'OmitFramePointers': 'true',
-                    'StringPooling': 'true',
-
-                    'conditions': [
-                      ['OS=="win" and component=="shared_library"', {
-                        'RuntimeLibrary': '2',  #/MD
-                      }, {
-                        'RuntimeLibrary': '0',  #/MT
-                      }],
-                    ],
-                  },
-                  'VCLinkerTool': {
-                    'LinkIncremental': '1',
-                    'OptimizeReferences': '2',
-                    'OptimizeForWindows98': '1',
-                    'EnableCOMDATFolding': '2',
-                    # For future reference, the stack size needs to be
-                    # increased when building for Windows 64-bit, otherwise
-                    # some test cases can cause stack overflow.
-                    # 'StackReserveSize': '297152',
-                  },
-                },
-              }],
-            ],
-          },
-        },
-      },
       'targets': [
         {
           'target_name': 'v8',
@@ -316,16 +94,6 @@
               'toolsets': ['target'],
               'dependencies': ['mksnapshot', 'js2c'],
             }],
-            # The ARM assembler assumes the host is 32 bits,
-            # so force building 32-bit host tools.
-            ['v8_target_arch=="arm" and host_arch=="x64"', {
-              'target_conditions': [
-                ['_toolset=="host"', {
-                  'cflags': ['-m32'],
-                  'ldflags': ['-m32'],
-                }],
-              ],
-            }],
             ['component=="shared_library"', {
               'conditions': [
                 ['OS=="win"', {
@@ -432,16 +200,6 @@
             '../../src/snapshot-empty.cc',
           ],
           'conditions': [
-            # The ARM assembler assumes the host is 32 bits,
-            # so force building 32-bit host tools.
-            ['v8_target_arch=="arm" and host_arch=="x64"', {
-              'target_conditions': [
-                ['_toolset=="host"', {
-                  'cflags': ['-m32'],
-                  'ldflags': ['-m32'],
-                }],
-              ],
-            }],
             ['want_separate_host_toolset==1', {
               'toolsets': ['host', 'target'],
               'dependencies': ['js2c#host'],
@@ -752,18 +510,6 @@
                 '../../src/arm/simulator-arm.cc',
                 '../../src/arm/stub-cache-arm.cc',
               ],
-              'conditions': [
-                # The ARM assembler assumes the host is 32 bits,
-                # so force building 32-bit host tools.
-                ['host_arch=="x64"', {
-                  'target_conditions': [
-                    ['_toolset=="host"', {
-                      'cflags': ['-m32'],
-                      'ldflags': ['-m32'],
-                    }],
-                  ],
-                }],
-              ],
             }],
             ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
               'sources': [
@@ -1002,21 +748,12 @@
             }, {
               'toolsets': ['target'],
             }],
-            # The ARM assembler assumes the host is 32 bits,
-            # so force building 32-bit host tools.
-            ['v8_target_arch=="arm" and host_arch=="x64"', {
-              'target_conditions': [
-                ['_toolset=="host"', {
-                  'cflags': ['-m32'],
-                  'ldflags': ['-m32'],
-                }],
-              ],
-            }],
             ['v8_compress_startup_data=="bz2"', {
               'libraries': [
                 '-lbz2',
-              ]}],
-          ]
+              ]}
+            ],
+          ],
         },
         {
           'target_name': 'v8_shell',
@@ -1077,16 +814,6 @@
             }, {
               'toolsets': ['target'],
             }],
-            # The ARM assembler assumes the host is 32 bits,
-            # so force building 32-bit host tools.
-            ['v8_target_arch=="arm" and host_arch=="x64"', {
-              'target_conditions': [
-                ['_toolset=="host"', {
-                  'cflags': ['-m32'],
-                  'ldflags': ['-m32'],
-                }],
-              ],
-            }],
           ],
           'link_settings': {
             'libraries': [
diff --git a/tools/test-wrapper-gypbuild.py b/tools/test-wrapper-gypbuild.py
index efd1c1d..9bc6bf6 100755
--- a/tools/test-wrapper-gypbuild.py
+++ b/tools/test-wrapper-gypbuild.py
@@ -213,6 +213,7 @@
       print ">>> running tests for %s.%s" % (arch, mode)
       shell = workspace + '/' + options.outdir + '/' + arch + '.' + mode + "/d8"
       child = subprocess.Popen(' '.join(args_for_children +
+                                        ['--arch=' + arch] +
                                         ['--mode=' + mode] +
                                         ['--shell=' + shell]),
                                shell=True,