Update V8 to r6101 as required by WebKit r74534

Change-Id: I7f84af8dd732f11898fd644b2c2b1538914cb78d
diff --git a/AUTHORS b/AUTHORS
index 3749ceb..ea5b93e 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -11,6 +11,7 @@
 Alexander Botero-Lowry <alexbl@FreeBSD.org>
 Alexandre Vassalotti <avassalotti@gmail.com>
 Andreas Anyuru <andreas.anyuru@gmail.com>
+Bert Belder <bertbelder@gmail.com>
 Burcu Dogan <burcujdogan@gmail.com>
 Craig Schlenter <craig.schlenter@gmail.com>
 Daniel Andersson <kodandersson@gmail.com>
diff --git a/Android.v8common.mk b/Android.v8common.mk
index 0c33d18..d1e98ee 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -24,6 +24,7 @@
 	src/dateparser.cc \
 	src/debug.cc \
 	src/debug-agent.cc \
+	src/deoptimizer.cc \
 	src/disassembler.cc \
 	src/diy-fp.cc \
 	src/dtoa.cc \
@@ -43,11 +44,14 @@
 	src/hashmap.cc \
 	src/heap.cc \
 	src/heap-profiler.cc \
+	src/hydrogen.cc \
+	src/hydrogen-instructions.cc \
 	src/ic.cc \
 	src/interpreter-irregexp.cc \
 	src/jsregexp.cc \
 	src/jump-target.cc \
 	src/jump-target-light.cc \
+	src/lithium-allocator.cc \
 	src/liveedit.cc \
 	src/log.cc \
 	src/log-utils.cc \
@@ -66,6 +70,8 @@
 	src/register-allocator.cc \
 	src/rewriter.cc \
 	src/runtime.cc \
+	src/runtime-profiler.cc \
+	src/safepoint-table.cc \
 	src/scanner.cc \
 	src/scanner-base.cc \
 	src/scopeinfo.cc \
@@ -79,6 +85,7 @@
 	src/stub-cache.cc \
 	src/token.cc \
 	src/top.cc \
+	src/type-info.cc \
 	src/unicode.cc \
 	src/utils.cc \
 	src/v8-counters.cc \
@@ -99,11 +106,14 @@
 		src/arm/constants-arm.cc \
 		src/arm/cpu-arm.cc \
 		src/arm/debug-arm.cc \
+		src/arm/deoptimizer-arm.cc \
 		src/arm/disasm-arm.cc \
 		src/arm/frames-arm.cc \
 		src/arm/full-codegen-arm.cc \
 		src/arm/ic-arm.cc \
 		src/arm/jump-target-arm.cc \
+		src/arm/lithium-arm.cc \
+		src/arm/lithium-codegen-arm.cc \
 		src/arm/macro-assembler-arm.cc \
 		src/arm/regexp-macro-assembler-arm.cc \
 		src/arm/register-allocator-arm.cc \
diff --git a/ChangeLog b/ChangeLog
index 86e41e1..c81d7a7 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,60 @@
+2010-12-21: Version 3.0.4
+
+        Added Date::ResetCache() to the API so that the cached values in the
+        Date object can be reset to allow live DST / timezone changes.
+
+        Extended existing support for printing (while debugging) the contents
+        of objects.  Added support for printing objects from release builds.
+
+        Fixed V8 issues 989, 1006, and 1007.
+
+
+2010-12-17: Version 3.0.3
+
+        Reapplied all changes for version 3.0.1.
+
+        Improved debugger protocol for remote debugging.
+
+        Added experimental support for using gyp to generate build files
+        for V8.
+
+        Fixed implementation of String::Write in the API (issue 975).
+
+
+2010-12-15: Version 3.0.2
+
+        Revert version 3.0.1 and patch 3.0.1.1.
+
+
+2010-12-13: Version 3.0.1
+
+        Added support for an experimental internationalization API as an
+        extension.  This extension is disabled by default but can be enabled
+        when building V8.  The ECMAScript internationalization strawman is
+        at http://wiki.ecmascript.org/doku.php?id=strawman:i18n_api.
+
+        Made RegExp character class parsing stricter.  This mirrors a change
+        to RegExp parsing in WebKit.
+
+        Fixed a bug in Object.defineProperty when used to change attributes
+        of an existing property.  It incorrectly set the property value to
+        undefined (issue 965).
+
+        Fixed several different compilation failures on various platforms
+        caused by the 3.0.0 release.
+
+        Optimized Math.pow so it can work on unboxed doubles.
+
+        Sped up quoting of JSON strings by removing one traversal of the
+        string.
+
+
+2010-12-07: Version 3.0.0
+
+        Improved performance by (partially) addressing issue 957 on
+        IA-32. Still needs more work for the other architectures.
+
+
 2010-11-29: Version 2.5.9
 
         Fixed crashes during GC caused by partially initialize heap
@@ -126,7 +183,7 @@
 
         Added USE_SIMULATOR macro that explicitly indicates that we wish to use
         the simulator as the execution engine (by Mark Lam <mark.lam@palm.com>
-	from Hewlett-Packard Development Company, LP).
+        from Hewlett-Packard Development Company, LP).
 
         Fixed compilation error on ARM with gcc 4.4 (issue 894).
 
diff --git a/SConstruct b/SConstruct
index 820c1a1..f6d1385 100644
--- a/SConstruct
+++ b/SConstruct
@@ -108,11 +108,14 @@
       'CPPDEFINES': ['V8_INTERPRETED_REGEXP']
     },
     'mode:debug': {
-      'CPPDEFINES': ['V8_ENABLE_CHECKS']
+      'CPPDEFINES': ['V8_ENABLE_CHECKS', 'OBJECT_PRINT']
     },
     'vmstate:on': {
       'CPPDEFINES':   ['ENABLE_VMSTATE_TRACKING'],
     },
+    'objectprint:on': {
+      'CPPDEFINES':   ['OBJECT_PRINT'],
+    },
     'protectheap:on': {
       'CPPDEFINES':   ['ENABLE_VMSTATE_TRACKING', 'ENABLE_HEAP_PROTECTION'],
     },
@@ -523,7 +526,8 @@
       'CCFLAGS':      ['-O2']
     },
     'mode:debug': {
-      'CCFLAGS':      ['-g', '-O0']
+      'CCFLAGS':      ['-g', '-O0'],
+      'CPPDEFINES':   ['DEBUG']
     },
     'prof:oprofile': {
       'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
@@ -578,13 +582,14 @@
       'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
     },
     'mode:debug': {
-      'CCFLAGS':   ['/Od'],
-      'LINKFLAGS': ['/DEBUG'],
+      'CCFLAGS':    ['/Od'],
+      'LINKFLAGS':  ['/DEBUG'],
+      'CPPDEFINES': ['DEBUG'],
       'msvcrt:static': {
-        'CCFLAGS': ['/MTd']
+        'CCFLAGS':  ['/MTd']
       },
       'msvcrt:shared': {
-        'CCFLAGS': ['/MDd']
+        'CCFLAGS':  ['/MDd']
       }
     }
   }
@@ -654,9 +659,18 @@
     return None
 
 
+def GuessVisibility(os, toolchain):
+  if os == 'win32' and toolchain == 'gcc':
+    # MinGW can't do it.
+    return 'default'
+  else:
+    return 'hidden'
+
+
 OS_GUESS = utils.GuessOS()
 TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
 ARCH_GUESS = utils.GuessArchitecture()
+VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS)
 
 
 SIMPLE_OPTIONS = {
@@ -700,6 +714,11 @@
     'default': 'off',
     'help': 'enable VM state tracking'
   },
+  'objectprint': {
+    'values': ['on', 'off'],
+    'default': 'off',
+    'help': 'enable object printing'
+  },
   'protectheap': {
     'values': ['on', 'off'],
     'default': 'off',
@@ -762,8 +781,8 @@
   },
   'visibility': {
     'values': ['default', 'hidden'],
-    'default': 'hidden',
-    'help': 'shared library symbol visibility'
+    'default': VISIBILITY_GUESS,
+    'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS
   },
   'pgo': {
     'values': ['off', 'instrument', 'optimize'],
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 7d87a5a..afd88f3 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1 +1,5 @@
-http://v8.googlecode.com/svn/branches/2.5@7184
+We use a V8 revision that has been used for a Chromium release.
+
+http://src.chromium.org/svn/releases/10.0.621.0/DEPS
+http://v8.googlecode.com/svn/trunk@6101 (+ 1 cherry pick from r6346 in src/v8utils.h to fix compile on Android)
+
diff --git a/build/README.txt b/build/README.txt
new file mode 100644
index 0000000..7cd7e18
--- /dev/null
+++ b/build/README.txt
@@ -0,0 +1,25 @@
+This directory contains the V8 GYP files used to generate actual project files
+for different build systems.
+
+This is currently work in progress but this is expected to replace the SCons
+based build system.
+
+To use this a checkout of GYP is needed inside this directory. From the root of
+the V8 project do the following
+
+$ svn co http://gyp.googlecode.com/svn/trunk build/gyp
+
+To generate Makefiles and build 32-bit version on Linux:
+
+$ GYP_DEFINES=target_arch=ia32 build/gyp_v8
+$ make
+
+To generate Makefiles and build 64-bit version on Linux:
+
+$ GYP_DEFINES=target_arch=x64 build/gyp_v8
+$ make
+
+To generate Makefiles and build for the arm simulator on Linux:
+
+$ build/gyp_v8 -I build/arm.gypi
+$ make
diff --git a/build/all.gyp b/build/all.gyp
new file mode 100644
index 0000000..544e2c2
--- /dev/null
+++ b/build/all.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      'target_name': 'All',
+      'type': 'none',
+      'dependencies': [
+        '../samples/samples.gyp:*',
+        '../test/cctest/cctest.gyp:*',
+        '../src/d8.gyp:*',
+      ]
+    }
+  ]
+}
+
diff --git a/build/armu.gypi b/build/armu.gypi
new file mode 100644
index 0000000..72eb4d1
--- /dev/null
+++ b/build/armu.gypi
@@ -0,0 +1,32 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    'v8_target_arch': 'arm',
+  }
+}
diff --git a/build/common.gypi b/build/common.gypi
new file mode 100644
index 0000000..3b5358e
--- /dev/null
+++ b/build/common.gypi
@@ -0,0 +1,82 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    'library%': 'static_library',
+    'component%': 'static_library',
+    'visibility%': 'hidden',
+    'variables': {
+      'conditions': [
+        [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+          # This handles the Linux platforms we generally deal with. Anything
+          # else gets passed through, which probably won't work very well; such
+          # hosts should pass an explicit target_arch to gyp.
+          'host_arch%':
+            '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
+        }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
+          'host_arch%': 'ia32',
+        }],
+      ],
+    },
+    'host_arch%': '<(host_arch)',
+    'target_arch%': '<(host_arch)',
+    'v8_target_arch%': '<(target_arch)',
+  },
+  'target_defaults': {
+    'default_configuration': 'Debug',
+    'configurations': {
+      'Debug': {
+        'cflags': [ '-g', '-O0' ],
+        'defines': [ 'ENABLE_DISASSEMBLER', 'DEBUG' ],
+      },
+      'Release': {
+        'cflags': [ '-O3', '-fomit-frame-pointer', '-fdata-sections', '-ffunction-sections' ],
+      },
+    },
+  },
+  'conditions': [
+    [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+      'target_defaults': {
+        'cflags': [ '-Wall', '-pthread', '-fno-rtti', '-fno-exceptions' ],
+        'ldflags': [ '-pthread', ],
+        'conditions': [
+          [ 'target_arch=="ia32"', {
+            'cflags': [ '-m32' ],
+            'ldflags': [ '-m32' ],
+          }],
+          [ 'OS=="linux"', {
+            'cflags': [ '-ansi' ],
+          }],
+          [ 'visibility=="hidden"', {
+            'cflags': [ '-fvisibility=hidden' ],
+          }],
+        ],
+      },
+    }],
+  ],
+}
diff --git a/build/gyp_v8 b/build/gyp_v8
new file mode 100755
index 0000000..0c2221e
--- /dev/null
+++ b/build/gyp_v8
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script is wrapper for V8 that adds some support for how GYP
+# is invoked by V8 beyond what can be done in the gclient hooks.
+
+import glob
+import os
+import shlex
+import sys
+
+script_dir = os.path.dirname(__file__)
+v8_root = os.path.normpath(os.path.join(script_dir, os.pardir))
+
+sys.path.insert(0, os.path.join(v8_root, 'build', 'gyp', 'pylib'))
+import gyp
+
+def apply_gyp_environment(file_path=None):
+  """
+  Reads in a *.gyp_env file and applies the valid keys to os.environ.
+  """
+  if not file_path or not os.path.exists(file_path):
+    return
+  file_contents = open(file_path).read()
+  try:
+    file_data = eval(file_contents, {'__builtins__': None}, None)
+  except SyntaxError, e:
+    e.filename = os.path.abspath(file_path)
+    raise
+  supported_vars = ( 'V8_GYP_FILE',
+                     'V8_GYP_SYNTAX_CHECK',
+                     'GYP_DEFINES',
+                     'GYP_GENERATOR_FLAGS',
+                     'GYP_GENERATOR_OUTPUT', )
+  for var in supported_vars:
+    val = file_data.get(var)
+    if val:
+      if var in os.environ:
+        print 'INFO: Environment value for "%s" overrides value in %s.' % (
+            var, os.path.abspath(file_path)
+        )
+      else:
+        os.environ[var] = val
+
+def additional_include_files(args=[]):
+  """
+  Returns a list of additional (.gypi) files to include, without
+  duplicating ones that are already specified on the command line.
+  """
+  # Determine the include files specified on the command line.
+  # This doesn't cover all the different option formats you can use,
+  # but it's mainly intended to avoid duplicating flags on the automatic
+  # makefile regeneration which only uses this format.
+  specified_includes = set()
+  for arg in args:
+    if arg.startswith('-I') and len(arg) > 2:
+      specified_includes.add(os.path.realpath(arg[2:]))
+
+  result = []
+  def AddInclude(path):
+    if os.path.realpath(path) not in specified_includes:
+      result.append(path)
+
+  # Always include common.gypi & features_override.gypi
+  AddInclude(os.path.join(script_dir, 'common.gypi'))
+
+  # Optionally add supplemental .gypi files if present.
+  supplements = glob.glob(os.path.join(v8_root, '*', 'supplement.gypi'))
+  for supplement in supplements:
+    AddInclude(supplement)
+
+  return result
+
+if __name__ == '__main__':
+  args = sys.argv[1:]
+
+  if 'SKIP_V8_GYP_ENV' not in os.environ:
+    # Update the environment based on v8.gyp_env
+    gyp_env_path = os.path.join(os.path.dirname(v8_root), 'v8.gyp_env')
+    apply_gyp_environment(gyp_env_path)
+
+  # This could give false positives since it doesn't actually do real option
+  # parsing.  Oh well.
+  gyp_file_specified = False
+  for arg in args:
+    if arg.endswith('.gyp'):
+      gyp_file_specified = True
+      break
+
+  # If we didn't get a file, check an env var, and then fall back to
+  # assuming 'all.gyp' from the same directory as the script.
+  if not gyp_file_specified:
+    gyp_file = os.environ.get('V8_GYP_FILE')
+    if gyp_file:
+      # Note that V8_GYP_FILE values can't have backslashes as
+      # path separators even on Windows due to the use of shlex.split().
+      args.extend(shlex.split(gyp_file))
+    else:
+      args.append(os.path.join(script_dir, 'all.gyp'))
+
+  args.extend(['-I' + i for i in additional_include_files(args)])
+
+  # There shouldn't be a circular dependency relationship between .gyp files
+  args.append('--no-circular-check')
+
+  # Set the GYP DEPTH variable to the root of the V8 project.
+  args.append('--depth=' + v8_root)
+
+  # If V8_GYP_SYNTAX_CHECK is set to 1, it will invoke gyp with --check
+  # to enfore syntax checking.
+  syntax_check = os.environ.get('V8_GYP_SYNTAX_CHECK')
+  if syntax_check and int(syntax_check):
+    args.append('--check')
+
+  print 'Updating projects from gyp files...'
+  sys.stdout.flush()
+
+  # Off we go...
+  sys.exit(gyp.main(args))
diff --git a/copy-new-sources b/copy-new-sources
new file mode 100755
index 0000000..84fc684
--- /dev/null
+++ b/copy-new-sources
@@ -0,0 +1 @@
+cp -r AUTHORS ChangeLog LICENSE SConstruct benchmarks include samples src test tools ../android/master/external/v8/ 
diff --git a/include/v8-preparser.h b/include/v8-preparser.h
new file mode 100644
index 0000000..9425f7d
--- /dev/null
+++ b/include/v8-preparser.h
@@ -0,0 +1,116 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef PREPARSER_H
+#define PREPARSER_H
+
+#include "v8stdint.h"
+
+#ifdef _WIN32
+
+// Setup for Windows DLL export/import. When building the V8 DLL the
+// BUILDING_V8_SHARED needs to be defined. When building a program which uses
+// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
+// static library or building a program which uses the V8 static library neither
+// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
+#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
+#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
+  build configuration to ensure that at most one of these is set
+#endif
+
+#ifdef BUILDING_V8_SHARED
+#define V8EXPORT __declspec(dllexport)
+#elif USING_V8_SHARED
+#define V8EXPORT __declspec(dllimport)
+#else
+#define V8EXPORT
+#endif  // BUILDING_V8_SHARED
+
+#else  // _WIN32
+
+// Setup for Linux shared library export. There is no need to distinguish
+// between building or using the V8 shared library, but we should not
+// export symbols when we are building a static library.
+#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#define V8EXPORT __attribute__ ((visibility("default")))
+#else  // defined(__GNUC__) && (__GNUC__ >= 4)
+#define V8EXPORT
+#endif  // defined(__GNUC__) && (__GNUC__ >= 4)
+
+#endif  // _WIN32
+
+
+namespace v8 {
+
+
+class PreParserData {
+ public:
+  PreParserData(size_t size, const uint8_t* data)
+      : data_(data), size_(size) { }
+
+  // Create a PreParserData value where stack_overflow reports true.
+  static PreParserData StackOverflow() { return PreParserData(NULL, 0); }
+  // Whether the pre-parser stopped due to a stack overflow.
+  // If this is the case, size() and data() should not be used.
+
+  bool stack_overflow() { return size_ == 0u; }
+
+  // The size of the data in bytes.
+  size_t size() const { return size_; }
+
+  // Pointer to the data.
+  const uint8_t* data() const { return data_; }
+
+ private:
+  const uint8_t* const data_;
+  const size_t size_;
+};
+
+
+// Interface for a stream of Unicode characters.
+class UnicodeInputStream {
+ public:
+  virtual ~UnicodeInputStream();
+
+  // Returns the next Unicode code-point in the input, or a negative value when
+  // there is no more input in the stream.
+  virtual int32_t Next() = 0;
+};
+
+
+// Preparse a JavaScript program. The source code is provided as a
+// UnicodeInputStream. The max_stack_size limits the amount of stack
+// space that the preparser is allowed to use. If the preparser uses
+// more stack space than the limit provided, the result's stack_overflow()
+// method will return true. Otherwise the result contains preparser
+// data that can be used by the V8 parser to speed up parsing.
+PreParserData V8EXPORT Preparse(UnicodeInputStream* input,
+                                size_t max_stack_size);
+
+}  // namespace v8.
+
+#endif  // PREPARSER_H
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 72195c4..675a229 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -245,7 +245,6 @@
 class V8EXPORT HeapGraphNode {
  public:
   enum Type {
-    kInternal = 0,   // For compatibility, will be removed.
     kHidden = 0,     // Hidden node, may be filtered when shown to user.
     kArray = 1,      // An array of elements.
     kString = 2,     // A string.
@@ -358,6 +357,9 @@
   /** Returns the root node of the heap graph. */
   const HeapGraphNode* GetRoot() const;
 
+  /** Returns a node by its id. */
+  const HeapGraphNode* GetNodeById(uint64_t id) const;
+
   /**
    * Returns a diff between this snapshot and another one. Only snapshots
    * of the same type can be compared.
@@ -410,7 +412,8 @@
    */
   static const HeapSnapshot* TakeSnapshot(
       Handle<String> title,
-      HeapSnapshot::Type type = HeapSnapshot::kFull);
+      HeapSnapshot::Type type = HeapSnapshot::kFull,
+      ActivityControl* control = NULL);
 };
 
 
diff --git a/include/v8-testing.h b/include/v8-testing.h
new file mode 100644
index 0000000..4db30a4
--- /dev/null
+++ b/include/v8-testing.h
@@ -0,0 +1,99 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_V8_TEST_H_
+#define V8_V8_TEST_H_
+
+#include "v8.h"
+
+#ifdef _WIN32
+// Setup for Windows DLL export/import. See v8.h in this directory for
+// information on how to build/use V8 as a DLL.
+#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
+#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
+  build configuration to ensure that at most one of these is set
+#endif
+
+#ifdef BUILDING_V8_SHARED
+#define V8EXPORT __declspec(dllexport)
+#elif USING_V8_SHARED
+#define V8EXPORT __declspec(dllimport)
+#else
+#define V8EXPORT
+#endif
+
+#else  // _WIN32
+
+// Setup for Linux shared library export. See v8.h in this directory for
+// information on how to build/use V8 as shared library.
+#if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
+#define V8EXPORT __attribute__ ((visibility("default")))
+#else  // defined(__GNUC__) && (__GNUC__ >= 4)
+#define V8EXPORT
+#endif  // defined(__GNUC__) && (__GNUC__ >= 4)
+
+#endif  // _WIN32
+
+
+/**
+ * Testing support for the V8 JavaScript engine.
+ */
+namespace v8 {
+
+class V8EXPORT Testing {
+ public:
+  enum StressType {
+    kStressTypeOpt,
+    kStressTypeDeopt
+  };
+
+  /**
+   * Set the type of stressing to do. The default if not set is kStressTypeOpt.
+   */
+  static void SetStressRunType(StressType type);
+
+  /**
+   * Get the number of runs of a given test that is required to get the full
+   * stress coverage.
+   */
+  static int GetStressRuns();
+
+  /**
+   * Indicate the number of the run which is about to start. The value of run
+   * should be between 0 and one less than the result from GetStressRuns()
+   */
+  static void PrepareStressRun(int run);
+};
+
+
+}  // namespace v8
+
+
+#undef V8EXPORT
+
+
+#endif  // V8_V8_TEST_H_
diff --git a/include/v8.h b/include/v8.h
index a202eaa..2c0f350 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -992,18 +992,23 @@
    * the contents of the string and the NULL terminator into the
    * buffer.
    *
+   * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
+   * before the end of the buffer.
+   *
    * Copies up to length characters into the output buffer.
    * Only null-terminates if there is enough space in the buffer.
    *
    * \param buffer The buffer into which the string will be copied.
    * \param start The starting position within the string at which
    * copying begins.
-   * \param length The number of bytes to copy from the string.
+   * \param length The number of characters to copy from the string.  For
+   *    WriteUtf8 the number of bytes in the buffer.
    * \param nchars_ref The number of characters written, can be NULL.
    * \param hints Various hints that might affect performance of this or
    *    subsequent operations.
-   * \return The number of bytes copied to the buffer
-   * excluding the NULL terminator.
+   * \return The number of characters copied to the buffer excluding the null
+   *    terminator.  For WriteUtf8: The number of bytes copied to the buffer
+   *    including the null terminator.
    */
   enum WriteHints {
     NO_HINTS = 0,
@@ -1350,6 +1355,21 @@
   V8EXPORT double NumberValue() const;
 
   static inline Date* Cast(v8::Value* obj);
+
+  /**
+   * Notification that the embedder has changed the time zone,
+   * daylight savings time, or other date / time configuration
+   * parameters.  V8 keeps a cache of various values used for
+   * date / time computation.  This notification will reset
+   * those cached values for the current context so that date /
+   * time configuration changes would be reflected in the Date
+   * object.
+   *
+   * This API should not be called more than needed as it will
+   * negatively impact the performance of date operations.
+   */
+  V8EXPORT static void DateTimeConfigurationChangeNotification();
+
  private:
   V8EXPORT static void CheckCast(v8::Value* obj);
 };
@@ -3281,6 +3301,24 @@
 };
 
 
+/**
+ * An interface for reporting progress and controlling long-running
+ * activities.
+ */
+class V8EXPORT ActivityControl {  // NOLINT
+ public:
+  enum ControlOption {
+    kContinue = 0,
+    kAbort = 1
+  };
+  virtual ~ActivityControl() {}
+  /**
+   * Notify about current progress. The activity can be stopped by
+   * returning kAbort as the callback result.
+   */
+  virtual ControlOption ReportProgressValue(int done, int total) = 0;
+};
+
 
 // --- I m p l e m e n t a t i o n ---
 
@@ -3300,10 +3338,10 @@
 const int kSmiTagSize = 1;
 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
 
-template <size_t ptr_size> struct SmiTagging;
+template <size_t ptr_size> struct SmiConstants;
 
 // Smi constants for 32-bit systems.
-template <> struct SmiTagging<4> {
+template <> struct SmiConstants<4> {
   static const int kSmiShiftSize = 0;
   static const int kSmiValueSize = 31;
   static inline int SmiToInt(internal::Object* value) {
@@ -3311,15 +3349,10 @@
     // Throw away top 32 bits and shift down (requires >> to be sign extending).
     return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
   }
-
-  // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
-  // with a plain reinterpret_cast.
-  static const uintptr_t kEncodablePointerMask = 0x1;
-  static const int kPointerToSmiShift = 0;
 };
 
 // Smi constants for 64-bit systems.
-template <> struct SmiTagging<8> {
+template <> struct SmiConstants<8> {
   static const int kSmiShiftSize = 31;
   static const int kSmiValueSize = 32;
   static inline int SmiToInt(internal::Object* value) {
@@ -3327,26 +3360,10 @@
     // Shift down and throw away top 32 bits.
     return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
   }
-
-  // To maximize the range of pointers that can be encoded
-  // in the available 32 bits, we require them to be 8 bytes aligned.
-  // This gives 2 ^ (32 + 3) = 32G address space covered.
-  // It might be not enough to cover stack allocated objects on some platforms.
-  static const int kPointerAlignment = 3;
-
-  static const uintptr_t kEncodablePointerMask =
-      ~(uintptr_t(0xffffffff) << kPointerAlignment);
-
-  static const int kPointerToSmiShift =
-      kSmiTagSize + kSmiShiftSize - kPointerAlignment;
 };
 
-typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
-const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
-const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const uintptr_t kEncodablePointerMask =
-    PlatformSmiTagging::kEncodablePointerMask;
-const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
+const int kSmiShiftSize = SmiConstants<kApiPointerSize>::kSmiShiftSize;
+const int kSmiValueSize = SmiConstants<kApiPointerSize>::kSmiValueSize;
 
 template <size_t ptr_size> struct InternalConstants;
 
@@ -3394,7 +3411,7 @@
   }
 
   static inline int SmiValue(internal::Object* value) {
-    return PlatformSmiTagging::SmiToInt(value);
+    return SmiConstants<kApiPointerSize>::SmiToInt(value);
   }
 
   static inline int GetInstanceType(internal::Object* obj) {
@@ -3403,14 +3420,9 @@
     return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
   }
 
-  static inline void* GetExternalPointerFromSmi(internal::Object* value) {
-    const uintptr_t address = reinterpret_cast<uintptr_t>(value);
-    return reinterpret_cast<void*>(address >> kPointerToSmiShift);
-  }
-
   static inline void* GetExternalPointer(internal::Object* obj) {
     if (HasSmiTag(obj)) {
-      return GetExternalPointerFromSmi(obj);
+      return obj;
     } else if (GetInstanceType(obj) == kProxyType) {
       return ReadField<void*>(obj, kProxyProxyOffset);
     } else {
diff --git a/preparser/preparser-process.cc b/preparser/preparser-process.cc
index 706a225..26dfc42 100644
--- a/preparser/preparser-process.cc
+++ b/preparser/preparser-process.cc
@@ -25,17 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include <stdlib.h>
 #include <stdarg.h>
 #include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "allocation.h"
-#include "utils.h"
-#include "list.h"
-#include "smart-pointer.h"
-#include "scanner-base.h"
-#include "preparse-data.h"
-#include "preparser.h"
+#include "../include/v8-preparser.h"
+#include "unicode-inl.h"
 
 enum ResultCode { kSuccess = 0, kErrorReading = 1, kErrorWriting = 2 };
 
@@ -45,78 +39,66 @@
 // THIS FILE IS PROOF-OF-CONCEPT ONLY.
 // The final goal is a stand-alone preparser library.
 
-// UTF16Buffer based on an UTF-8 string in memory.
-class UTF8UTF16Buffer : public UTF16Buffer {
+
+class UTF8InputStream : public v8::UnicodeInputStream {
  public:
-  UTF8UTF16Buffer(uint8_t* buffer, size_t length)
-      : UTF16Buffer(),
-        buffer_(buffer),
+  UTF8InputStream(uint8_t* buffer, size_t length)
+      : buffer_(buffer),
         offset_(0),
+        pos_(0),
         end_offset_(static_cast<int>(length)) { }
 
-  virtual void PushBack(uc32 ch) {
+  virtual ~UTF8InputStream() { }
+
+  virtual void PushBack(int32_t ch) {
     // Pushback assumes that the character pushed back is the
     // one that was most recently read, and jumps back in the
     // UTF-8 stream by the length of that character's encoding.
     offset_ -= unibrow::Utf8::Length(ch);
     pos_--;
 #ifdef DEBUG
-    int tmp = 0;
-    ASSERT_EQ(ch, unibrow::Utf8::ValueOf(buffer_ + offset_,
-                                         end_offset_ - offset_,
-                                         &tmp);
+    if (static_cast<unsigned>(ch) <= unibrow::Utf8::kMaxOneByteChar) {
+      if (ch != buffer_[offset_]) {
+        fprintf(stderr, "Invalid pushback: '%c'.", ch);
+        exit(1);
+      }
+    } else {
+      unsigned tmp = 0;
+      if (static_cast<unibrow::uchar>(ch) !=
+          unibrow::Utf8::CalculateValue(buffer_ + offset_,
+                                        end_offset_ - offset_,
+                                        &tmp)) {
+        fprintf(stderr, "Invalid pushback: 0x%x.", ch);
+        exit(1);
+      }
+    }
 #endif
   }
 
-  virtual uc32 Advance() {
+  virtual int32_t Next() {
     if (offset_ == end_offset_) return -1;
     uint8_t first_char = buffer_[offset_];
     if (first_char <= unibrow::Utf8::kMaxOneByteChar) {
       pos_++;
       offset_++;
-      return static_cast<uc32>(first_char);
+      return static_cast<int32_t>(first_char);
     }
     unibrow::uchar codepoint =
         unibrow::Utf8::CalculateValue(buffer_ + offset_,
                                       end_offset_ - offset_,
                                       &offset_);
     pos_++;
-    return static_cast<uc32>(codepoint);
-  }
-
-  virtual void SeekForward(int pos) {
-    while (pos_ < pos) {
-      uint8_t first_byte = buffer_[offset_++];
-      while (first_byte & 0x80u && offset_ < end_offset_) {
-        offset_++;
-        first_byte <<= 1;
-      }
-      pos_++;
-    }
+    return static_cast<int32_t>(codepoint);
   }
 
  private:
   const uint8_t* buffer_;
   unsigned offset_;
+  unsigned pos_;
   unsigned end_offset_;
 };
 
 
-class StandAloneJavaScriptScanner : public JavaScriptScanner {
- public:
-  void Initialize(UTF16Buffer* source) {
-    source_ = source;
-    literal_flags_ = kLiteralString | kLiteralIdentifier;
-    Init();
-    // Skip initial whitespace allowing HTML comment ends just like
-    // after a newline and scan first token.
-    has_line_terminator_before_next_ = true;
-    SkipWhiteSpace();
-    Scan();
-  }
-};
-
-
 // Write a number to dest in network byte order.
 void WriteUInt32(FILE* dest, uint32_t value, bool* ok) {
   for (int i = 3; i >= 0; i--) {
@@ -145,83 +127,80 @@
 
 
 bool ReadBuffer(FILE* source, void* buffer, size_t length) {
-  size_t actually_read = fread(buffer, 1, length, stdin);
+  size_t actually_read = fread(buffer, 1, length, source);
   return (actually_read == length);
 }
 
 
-bool WriteBuffer(FILE* dest, void* buffer, size_t length) {
+bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
   size_t actually_written = fwrite(buffer, 1, length, dest);
   return (actually_written == length);
 }
 
-// Preparse stdin and output result on stdout.
-int PreParseIO() {
+
+template <typename T>
+class ScopedPointer {
+ public:
+  explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
+  ~ScopedPointer() { delete[] pointer_; }
+  T& operator[](int index) { return pointer_[index]; }
+  T* operator*() { return pointer_ ;}
+ private:
+  T* pointer_;
+};
+
+
+// Preparse input and output result on stdout.
+int PreParseIO(FILE* input) {
   fprintf(stderr, "LOG: Enter parsing loop\n");
   bool ok = true;
-  uint32_t length = ReadUInt32(stdin, &ok);
+  uint32_t length = ReadUInt32(input, &ok);
+  fprintf(stderr, "LOG: Input length: %d\n", length);
   if (!ok) return kErrorReading;
-  SmartPointer<byte> buffer(NewArray<byte>(length));
-  if (!ReadBuffer(stdin, *buffer, length)) {
+  ScopedPointer<uint8_t> buffer(new uint8_t[length]);
+
+  if (!ReadBuffer(input, *buffer, length)) {
     return kErrorReading;
   }
-  UTF8UTF16Buffer input_buffer(*buffer, static_cast<size_t>(length));
-  StandAloneJavaScriptScanner scanner;
-  scanner.Initialize(&input_buffer);
-  CompleteParserRecorder recorder;
-  preparser::PreParser preparser;
+  UTF8InputStream input_buffer(*buffer, static_cast<size_t>(length));
 
-  if (!preparser.PreParseProgram(&scanner, &recorder, true)) {
-    if (scanner.stack_overflow()) {
-      // Report stack overflow error/no-preparser-data.
-      WriteUInt32(stdout, 0, &ok);
-      if (!ok) return kErrorWriting;
-      return 0;
-    }
+  v8::PreParserData data =
+      v8::Preparse(&input_buffer, 64 * 1024 * sizeof(void*));  // NOLINT
+  if (data.stack_overflow()) {
+    fprintf(stderr, "LOG: Stack overflow\n");
+    fflush(stderr);
+    // Report stack overflow error/no-preparser-data.
+    WriteUInt32(stdout, 0, &ok);
+    if (!ok) return kErrorWriting;
+    return 0;
   }
-  Vector<unsigned> pre_data = recorder.ExtractData();
 
-  uint32_t size = static_cast<uint32_t>(pre_data.length() * sizeof(uint32_t));
+  uint32_t size = data.size();
+  fprintf(stderr, "LOG: Success, data size: %u\n", size);
+  fflush(stderr);
   WriteUInt32(stdout, size, &ok);
   if (!ok) return kErrorWriting;
-  if (!WriteBuffer(stdout,
-                   reinterpret_cast<byte*>(pre_data.start()),
-                   size)) {
+  if (!WriteBuffer(stdout, data.data(), size)) {
     return kErrorWriting;
   }
   return 0;
 }
 
-// Functions declared by allocation.h
-
-void FatalProcessOutOfMemory(const char* location) {
-  V8_Fatal("", 0, location);
-}
-
-bool EnableSlowAsserts() { return true; }
-
 } }  // namespace v8::internal
 
 
 int main(int argc, char* argv[]) {
+  FILE* input = stdin;
+  if (argc > 1) {
+    char* arg = argv[1];
+    input = fopen(arg, "rb");
+    if (input == NULL) return EXIT_FAILURE;
+  }
   int status = 0;
   do {
-    status = v8::internal::PreParseIO();
+    status = v8::internal::PreParseIO(input);
   } while (status == 0);
   fprintf(stderr, "EXIT: Failure %d\n", status);
-  return EXIT_FAILURE;
-}
-
-
-// Fatal error handling declared by checks.h.
-
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
-  fflush(stdout);
   fflush(stderr);
-  va_list arguments;
-  va_start(arguments, format);
-  vfprintf(stderr, format, arguments);
-  va_end(arguments);
-  fputs("\n#\n\n", stderr);
-  exit(EXIT_FAILURE);
+  return EXIT_FAILURE;
 }
diff --git a/samples/samples.gyp b/samples/samples.gyp
new file mode 100644
index 0000000..f383ee2
--- /dev/null
+++ b/samples/samples.gyp
@@ -0,0 +1,51 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'targets': [
+    {
+      'target_name': 'shell',
+      'type': 'executable',
+      'dependencies': [
+        '../tools/gyp/v8.gyp:v8',
+      ],
+      'sources': [
+        'shell.cc',
+      ],
+    },
+    {
+      'target_name': 'process',
+      'type': 'executable',
+      'dependencies': [
+        '../tools/gyp/v8.gyp:v8',
+      ],
+      'sources': [
+        'process.cc',
+      ],
+    }
+  ],
+}
diff --git a/samples/shell.cc b/samples/shell.cc
index 1a13f5f..6b67df6 100644
--- a/samples/shell.cc
+++ b/samples/shell.cc
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include <v8.h>
+#include <v8-testing.h>
 #include <fcntl.h>
 #include <string.h>
 #include <stdio.h>
@@ -47,7 +48,6 @@
 
 
 int RunMain(int argc, char* argv[]) {
-  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
   v8::HandleScope handle_scope;
   // Create a template for the global object.
   v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
@@ -63,11 +63,11 @@
   global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
   // Create a new execution environment containing the built-in
   // functions
-  v8::Handle<v8::Context> context = v8::Context::New(NULL, global);
-  // Enter the newly created execution environment.
-  v8::Context::Scope context_scope(context);
+  v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
   bool run_shell = (argc == 1);
   for (int i = 1; i < argc; i++) {
+    // Enter the execution environment before evaluating any code.
+    v8::Context::Scope context_scope(context);
     const char* str = argv[i];
     if (strcmp(str, "--shell") == 0) {
       run_shell = true;
@@ -99,12 +99,48 @@
     }
   }
   if (run_shell) RunShell(context);
+  context.Dispose();
   return 0;
 }
 
 
 int main(int argc, char* argv[]) {
-  int result = RunMain(argc, argv);
+  // Figure out if we're requested to stress the optimization
+  // infrastructure by running tests multiple times and forcing
+  // optimization in the last run.
+  bool FLAG_stress_opt = false;
+  bool FLAG_stress_deopt = false;
+  for (int i = 0; i < argc; i++) {
+    if (strcmp(argv[i], "--stress-opt") == 0) {
+      FLAG_stress_opt = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--stress-deopt") == 0) {
+      FLAG_stress_deopt = true;
+      argv[i] = NULL;
+    } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+      // No support for stressing if we can't use --always-opt.
+      FLAG_stress_opt = false;
+      FLAG_stress_deopt = false;
+      break;
+    }
+  }
+
+  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+  int result = 0;
+  if (FLAG_stress_opt || FLAG_stress_deopt) {
+    v8::Testing::SetStressRunType(FLAG_stress_opt
+                                  ? v8::Testing::kStressTypeOpt
+                                  : v8::Testing::kStressTypeDeopt);
+    int stress_runs = v8::Testing::GetStressRuns();
+    for (int i = 0; i < stress_runs && result == 0; i++) {
+      printf("============ Stress %d/%d ============\n",
+             i + 1, stress_runs);
+      v8::Testing::PrepareStressRun(i);
+      result = RunMain(argc, argv);
+    }
+  } else {
+    result = RunMain(argc, argv);
+  }
   v8::V8::Dispose();
   return result;
 }
@@ -221,6 +257,8 @@
 void RunShell(v8::Handle<v8::Context> context) {
   printf("V8 version %s\n", v8::V8::GetVersion());
   static const int kBufferSize = 256;
+  // Enter the execution environment before evaluating any code.
+  v8::Context::Scope context_scope(context);
   while (true) {
     char buffer[kBufferSize];
     printf("> ");
diff --git a/src/SConscript b/src/SConscript
index 8953698..dfa099c 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -40,6 +40,7 @@
     api.cc
     assembler.cc
     ast.cc
+    atomicops_internals_x86_gcc.cc
     bignum.cc
     bignum-dtoa.cc
     bootstrapper.cc
@@ -59,6 +60,7 @@
     dateparser.cc
     debug-agent.cc
     debug.cc
+    deoptimizer.cc
     disassembler.cc
     diy-fp.cc
     dtoa.cc
@@ -76,10 +78,13 @@
     hashmap.cc
     heap-profiler.cc
     heap.cc
+    hydrogen.cc
+    hydrogen-instructions.cc
     ic.cc
     interpreter-irregexp.cc
     jsregexp.cc
     jump-target.cc
+    lithium-allocator.cc
     liveedit.cc
     log-utils.cc
     log.cc
@@ -99,6 +104,8 @@
     register-allocator.cc
     rewriter.cc
     runtime.cc
+    runtime-profiler.cc
+    safepoint-table.cc
     scanner-base.cc
     scanner.cc
     scopeinfo.cc
@@ -134,11 +141,14 @@
     arm/constants-arm.cc
     arm/cpu-arm.cc
     arm/debug-arm.cc
+    arm/deoptimizer-arm.cc
     arm/disasm-arm.cc
     arm/frames-arm.cc
     arm/full-codegen-arm.cc
     arm/ic-arm.cc
     arm/jump-target-arm.cc
+    arm/lithium-arm.cc
+    arm/lithium-codegen-arm.cc
     arm/macro-assembler-arm.cc
     arm/regexp-macro-assembler-arm.cc
     arm/register-allocator-arm.cc
@@ -172,11 +182,14 @@
     ia32/codegen-ia32.cc
     ia32/cpu-ia32.cc
     ia32/debug-ia32.cc
+    ia32/deoptimizer-ia32.cc
     ia32/disasm-ia32.cc
     ia32/frames-ia32.cc
     ia32/full-codegen-ia32.cc
     ia32/ic-ia32.cc
     ia32/jump-target-ia32.cc
+    ia32/lithium-codegen-ia32.cc
+    ia32/lithium-ia32.cc
     ia32/macro-assembler-ia32.cc
     ia32/regexp-macro-assembler-ia32.cc
     ia32/register-allocator-ia32.cc
@@ -192,6 +205,7 @@
     x64/codegen-x64.cc
     x64/cpu-x64.cc
     x64/debug-x64.cc
+    x64/deoptimizer-x64.cc
     x64/disasm-x64.cc
     x64/frames-x64.cc
     x64/full-codegen-x64.cc
@@ -216,7 +230,8 @@
   'mode:release': [],
   'mode:debug': [
     'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
-  ]
+  ],
+  'objectprint:on': ['objects-debug.cc']
 }
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index 08ef41b..43d54fe 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -28,8 +28,11 @@
 #include "v8.h"
 
 #include "accessors.h"
+#include "ast.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "factory.h"
+#include "safepoint-table.h"
 #include "scopeinfo.h"
 #include "top.h"
 
@@ -503,11 +506,9 @@
     // If the function isn't compiled yet, the length is not computed
     // correctly yet. Compile it now and return the right length.
     HandleScope scope;
-    Handle<SharedFunctionInfo> shared(function->shared());
-    if (!CompileLazyShared(shared, KEEP_EXCEPTION)) {
-      return Failure::Exception();
-    }
-    return Smi::FromInt(shared->length());
+    Handle<JSFunction> handle(function);
+    if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
+    return Smi::FromInt(handle->shared()->length());
   } else {
     return Smi::FromInt(function->shared()->length());
   }
@@ -545,6 +546,208 @@
 // Accessors::FunctionArguments
 //
 
+static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
+  if (slot_index >= 0) {
+    const int offset = JavaScriptFrameConstants::kLocal0Offset;
+    return frame->fp() + offset - (slot_index * kPointerSize);
+  } else {
+    const int offset = JavaScriptFrameConstants::kReceiverOffset;
+    return frame->caller_sp() + offset + (slot_index * kPointerSize);
+  }
+}
+
+
+// We can't intermix stack decoding and allocations because
+// deoptimization infrastracture is not GC safe.
+// Thus we build a temporary structure in malloced space.
+class SlotRef BASE_EMBEDDED {
+ public:
+  enum SlotRepresentation {
+    UNKNOWN,
+    TAGGED,
+    INT32,
+    DOUBLE,
+    LITERAL
+  };
+
+  SlotRef()
+      : addr_(NULL), representation_(UNKNOWN) { }
+
+  SlotRef(Address addr, SlotRepresentation representation)
+      : addr_(addr), representation_(representation) { }
+
+  explicit SlotRef(Object* literal)
+      : literal_(literal), representation_(LITERAL) { }
+
+  Handle<Object> GetValue() {
+    switch (representation_) {
+      case TAGGED:
+        return Handle<Object>(Memory::Object_at(addr_));
+
+      case INT32: {
+        int value = Memory::int32_at(addr_);
+        if (Smi::IsValid(value)) {
+          return Handle<Object>(Smi::FromInt(value));
+        } else {
+          return Factory::NewNumberFromInt(value);
+        }
+      }
+
+      case DOUBLE: {
+        double value = Memory::double_at(addr_);
+        return Factory::NewNumber(value);
+      }
+
+      case LITERAL:
+        return literal_;
+
+      default:
+        UNREACHABLE();
+        return Handle<Object>::null();
+    }
+  }
+
+ private:
+  Address addr_;
+  Handle<Object> literal_;
+  SlotRepresentation representation_;
+};
+
+
+static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
+                                          DeoptimizationInputData* data,
+                                          JavaScriptFrame* frame) {
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+      // Peeled off before getting here.
+      break;
+
+    case Translation::ARGUMENTS_OBJECT:
+      // This can be only emitted for local slots not for argument slots.
+      break;
+
+    case Translation::REGISTER:
+    case Translation::INT32_REGISTER:
+    case Translation::DOUBLE_REGISTER:
+    case Translation::DUPLICATE:
+      // We are at safepoint which corresponds to call.  All registers are
+      // saved by caller so there would be no live registers at this
+      // point. Thus these translation commands should not be used.
+      break;
+
+    case Translation::STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::TAGGED);
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::INT32);
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::DOUBLE);
+    }
+
+    case Translation::LITERAL: {
+      int literal_index = iterator->Next();
+      return SlotRef(data->LiteralArray()->get(literal_index));
+    }
+  }
+
+  UNREACHABLE();
+  return SlotRef();
+}
+
+
+
+
+
+static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                           int inlined_frame_index,
+                                           Vector<SlotRef>* args_slots) {
+  AssertNoAllocation no_gc;
+
+  int deopt_index = AstNode::kNoNumber;
+
+  DeoptimizationInputData* data =
+      static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+
+  USE(frame_count);
+  ASSERT(frame_count > inlined_frame_index);
+
+  int frames_to_skip = inlined_frame_index;
+  while (true) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+
+    // Skip over operands to advance to the next opcode.
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+
+    if (opcode == Translation::FRAME) {
+      if (frames_to_skip == 0) {
+        // We reached frame corresponding to inlined function in question.
+        // Process translation commands for arguments.
+
+        // Skip translation command for receiver.
+        it.Skip(Translation::NumberOfOperandsFor(
+            static_cast<Translation::Opcode>(it.Next())));
+
+        // Compute slots for arguments.
+        for (int i = 0; i < args_slots->length(); ++i) {
+          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
+        }
+
+        return;
+      }
+
+      frames_to_skip--;
+    }
+  }
+
+  UNREACHABLE();
+}
+
+
+static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
+    JavaScriptFrame* frame,
+    Handle<JSFunction> inlined_function,
+    int inlined_frame_index) {
+
+  int args_count = inlined_function->shared()->formal_parameter_count();
+
+  ScopedVector<SlotRef> args_slots(args_count);
+
+  ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
+
+  Handle<JSObject> arguments =
+      Factory::NewArgumentsObject(inlined_function, args_count);
+
+  Handle<FixedArray> array = Factory::NewFixedArray(args_count);
+  for (int i = 0; i < args_count; ++i) {
+    Handle<Object> value = args_slots[i].GetValue();
+    array->set(i, *value);
+  }
+  arguments->set_elements(*array);
+
+  // Return the freshly allocated arguments object.
+  return *arguments;
+}
+
 
 MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
   HandleScope scope;
@@ -554,38 +757,50 @@
   Handle<JSFunction> function(holder);
 
   // Find the top invocation of the function by traversing frames.
+  List<JSFunction*> functions(2);
   for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
-    // Skip all frames that aren't invocations of the given function.
     JavaScriptFrame* frame = it.frame();
-    if (frame->function() != *function) continue;
+    frame->GetFunctions(&functions);
+    for (int i = functions.length() - 1; i >= 0; i--) {
+      // Skip all frames that aren't invocations of the given function.
+      if (functions[i] != *function) continue;
 
-    // If there is an arguments variable in the stack, we return that.
-    int index = function->shared()->scope_info()->
-        StackSlotIndex(Heap::arguments_symbol());
-    if (index >= 0) {
-      Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
-      if (!arguments->IsTheHole()) return *arguments;
+      if (i > 0) {
+        // Function in question was inlined.
+        return ConstructArgumentsObjectForInlinedFunction(frame, function, i);
+      } else {
+        // If there is an arguments variable in the stack, we return that.
+        int index = function->shared()->scope_info()->
+            StackSlotIndex(Heap::arguments_symbol());
+        if (index >= 0) {
+          Handle<Object> arguments =
+              Handle<Object>(frame->GetExpression(index));
+          if (!arguments->IsTheHole()) return *arguments;
+        }
+
+        // If there isn't an arguments variable in the stack, we need to
+        // find the frame that holds the actual arguments passed to the
+        // function on the stack.
+        it.AdvanceToArgumentsFrame();
+        frame = it.frame();
+
+        // Get the number of arguments and construct an arguments object
+        // mirror for the right frame.
+        const int length = frame->GetProvidedParametersCount();
+        Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
+                                                                 length);
+        Handle<FixedArray> array = Factory::NewFixedArray(length);
+
+        // Copy the parameters to the arguments object.
+        ASSERT(array->length() == length);
+        for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
+        arguments->set_elements(*array);
+
+        // Return the freshly allocated arguments object.
+        return *arguments;
+      }
     }
-
-    // If there isn't an arguments variable in the stack, we need to
-    // find the frame that holds the actual arguments passed to the
-    // function on the stack.
-    it.AdvanceToArgumentsFrame();
-    frame = it.frame();
-
-    // Get the number of arguments and construct an arguments object
-    // mirror for the right frame.
-    const int length = frame->GetProvidedParametersCount();
-    Handle<JSObject> arguments = Factory::NewArgumentsObject(function, length);
-    Handle<FixedArray> array = Factory::NewFixedArray(length);
-
-    // Copy the parameters to the arguments object.
-    ASSERT(array->length() == length);
-    for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
-    arguments->set_elements(*array);
-
-    // Return the freshly allocated arguments object.
-    return *arguments;
+    functions.Rewind(0);
   }
 
   // No frame corresponding to the given function found. Return null.
@@ -613,19 +828,34 @@
   if (!found_it) return Heap::undefined_value();
   Handle<JSFunction> function(holder);
 
-  // Find the top invocation of the function by traversing frames.
+  List<JSFunction*> functions(2);
   for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
-    // Skip all frames that aren't invocations of the given function.
-    if (it.frame()->function() != *function) continue;
-    // Once we have found the frame, we need to go to the caller
-    // frame. This may require skipping through a number of top-level
-    // frames, e.g. frames for scripts not functions.
-    while (true) {
-      it.Advance();
-      if (it.done()) return Heap::null_value();
-      JSFunction* caller = JSFunction::cast(it.frame()->function());
-      if (!caller->shared()->is_toplevel()) return caller;
+    JavaScriptFrame* frame = it.frame();
+    frame->GetFunctions(&functions);
+    for (int i = functions.length() - 1; i >= 0; i--) {
+      if (functions[i] == *function) {
+        // Once we have found the frame, we need to go to the caller
+        // frame. This may require skipping through a number of top-level
+        // frames, e.g. frames for scripts not functions.
+        if (i > 0) {
+          ASSERT(!functions[i - 1]->shared()->is_toplevel());
+          return functions[i - 1];
+        } else {
+          for (it.Advance(); !it.done(); it.Advance()) {
+            frame = it.frame();
+            functions.Rewind(0);
+            frame->GetFunctions(&functions);
+            if (!functions.last()->shared()->is_toplevel()) {
+              return functions.last();
+            }
+            ASSERT(functions.length() == 1);
+          }
+          if (it.done()) return Heap::null_value();
+          break;
+        }
+      }
     }
+    functions.Rewind(0);
   }
 
   // No frame corresponding to the given function found. Return null.
diff --git a/src/accessors.h b/src/accessors.h
index 96d742e..14ccc8f 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -78,13 +78,14 @@
   MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object,
                                                            void*);
   MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object,
-                                                           Object* value,
-                                                           void*);
+                                                      Object* value,
+                                                      void*);
+  static MaybeObject* FunctionGetArguments(Object* object, void*);
+
  private:
   // Accessor functions only used through the descriptor.
   static MaybeObject* FunctionGetLength(Object* object, void*);
   static MaybeObject* FunctionGetName(Object* object, void*);
-  static MaybeObject* FunctionGetArguments(Object* object, void*);
   static MaybeObject* FunctionGetCaller(Object* object, void*);
   MUST_USE_RESULT static MaybeObject* ArraySetLength(JSObject* object,
                                                      Object* value, void*);
diff --git a/src/allocation.h b/src/allocation.h
index 6f4bd2f..394366e 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -28,6 +28,9 @@
 #ifndef V8_ALLOCATION_H_
 #define V8_ALLOCATION_H_
 
+#include "checks.h"
+#include "globals.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/api.cc b/src/api.cc
index 089c797..110468e 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -33,6 +33,7 @@
 #include "bootstrapper.h"
 #include "compiler.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
@@ -40,18 +41,21 @@
 #include "parser.h"
 #include "platform.h"
 #include "profile-generator-inl.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "snapshot.h"
 #include "top.h"
 #include "v8threads.h"
 #include "version.h"
+#include "vm-state-inl.h"
 
 #include "../include/v8-profiler.h"
+#include "../include/v8-testing.h"
 
 #define LOG_API(expr) LOG(ApiEntryCall(expr))
 
 #ifdef ENABLE_VMSTATE_TRACKING
-#define ENTER_V8 i::VMState __state__(i::OTHER)
+#define ENTER_V8 ASSERT(i::V8::IsRunning()); i::VMState __state__(i::OTHER)
 #define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
 #else
 #define ENTER_V8 ((void) 0)
@@ -97,6 +101,7 @@
     }                                                                          \
   } while (false)
 
+
 // --- D a t a   t h a t   i s   s p e c i f i c   t o   a   t h r e a d ---
 
 
@@ -1160,14 +1165,22 @@
 
 
 ScriptData* ScriptData::PreCompile(const char* input, int length) {
-  unibrow::Utf8InputBuffer<> buf(input, length);
-  return i::ParserApi::PreParse(i::Handle<i::String>(), &buf, NULL);
+  i::Utf8ToUC16CharacterStream stream(
+      reinterpret_cast<const unsigned char*>(input), length);
+  return i::ParserApi::PreParse(&stream, NULL);
 }
 
 
 ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
   i::Handle<i::String> str = Utils::OpenHandle(*source);
-  return i::ParserApi::PreParse(str, NULL, NULL);
+  if (str->IsExternalTwoByteString()) {
+    i::ExternalTwoByteStringUC16CharacterStream stream(
+      i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
+    return i::ParserApi::PreParse(&stream, NULL);
+  } else {
+    i::GenericStringUC16CharacterStream stream(str, 0, str->length());
+    return i::ParserApi::PreParse(&stream, NULL);
+  }
 }
 
 
@@ -2312,6 +2325,11 @@
   HandleScope scope;
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+
+  // When turning on access checks for a global object deoptimize all functions
+  // as optimized code does not always handle access checks.
+  i::Deoptimizer::DeoptimizeGlobalObject(*self);
+
   EXCEPTION_PREAMBLE();
   i::Handle<i::Object> obj = i::ForceDeleteProperty(self, key_obj);
   has_pending_exception = obj.is_null();
@@ -2598,6 +2616,10 @@
   HandleScope scope;
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
 
+  // When turning on access checks for a global object deoptimize all functions
+  // as optimized code does not always handle access checks.
+  i::Deoptimizer::DeoptimizeGlobalObject(*obj);
+
   i::Handle<i::Map> new_map =
     i::Factory::CopyMapDropTransitions(i::Handle<i::Map>(obj->map()));
   new_map->set_is_access_check_needed(true);
@@ -3105,14 +3127,15 @@
     // using StringInputBuffer or Get(i) to access the characters.
     str->TryFlatten();
   }
-  int end = length;
-  if ( (length == -1) || (length > str->length() - start) )
-    end = str->length() - start;
+  int end = start + length;
+  if ((length == -1) || (length > str->length() - start) )
+    end = str->length();
   if (end < 0) return 0;
   i::String::WriteToFlat(*str, buffer, start, end);
-  if (length == -1 || end < length)
-    buffer[end] = '\0';
-  return end;
+  if (length == -1 || end - start < length) {
+    buffer[end - start] = '\0';
+  }
+  return end - start;
 }
 
 
@@ -3243,35 +3266,18 @@
 }
 
 
-static bool CanBeEncodedAsSmi(void* ptr) {
-  const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
-  return ((address & i::kEncodablePointerMask) == 0);
-}
-
-
-static i::Smi* EncodeAsSmi(void* ptr) {
-  ASSERT(CanBeEncodedAsSmi(ptr));
-  const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
-  i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
-  ASSERT(i::Internals::HasSmiTag(result));
-  ASSERT_EQ(result, i::Smi::FromInt(result->value()));
-  ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
-  return result;
-}
-
-
 void v8::Object::SetPointerInInternalField(int index, void* value) {
   ENTER_V8;
-  if (CanBeEncodedAsSmi(value)) {
-    Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
-  } else {
-    HandleScope scope;
-    i::Handle<i::Proxy> proxy =
-        i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
-    if (!proxy.is_null())
-        Utils::OpenHandle(this)->SetInternalField(index, *proxy);
+  i::Object* as_object = reinterpret_cast<i::Object*>(value);
+  if (as_object->IsSmi()) {
+    Utils::OpenHandle(this)->SetInternalField(index, as_object);
+    return;
   }
-  ASSERT_EQ(value, GetPointerFromInternalField(index));
+  HandleScope scope;
+  i::Handle<i::Proxy> proxy =
+      i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+  if (!proxy.is_null())
+      Utils::OpenHandle(this)->SetInternalField(index, *proxy);
 }
 
 
@@ -3279,7 +3285,6 @@
 
 bool v8::V8::Initialize() {
   if (i::V8::IsRunning()) return true;
-  ENTER_V8;
   HandleScope scope;
   if (i::Snapshot::Initialize()) return true;
   return i::V8::Initialize(NULL);
@@ -3403,6 +3408,7 @@
       global_constructor->set_needs_access_check(
           proxy_constructor->needs_access_check());
     }
+    i::RuntimeProfiler::Reset();
   }
   // Leave V8.
 
@@ -3554,13 +3560,11 @@
   LOG_API("External::Wrap");
   EnsureInitialized("v8::External::Wrap()");
   ENTER_V8;
-
-  v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
-      ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
-      : v8::Local<v8::Value>(ExternalNewImpl(data));
-
-  ASSERT_EQ(data, Unwrap(result));
-  return result;
+  i::Object* as_object = reinterpret_cast<i::Object*>(data);
+  if (as_object->IsSmi()) {
+    return Utils::ToLocal(i::Handle<i::Object>(as_object));
+  }
+  return ExternalNewImpl(data);
 }
 
 
@@ -3568,7 +3572,7 @@
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   i::Object* value = obj->GetInternalField(index);
   if (value->IsSmi()) {
-    return i::Internals::GetExternalPointerFromSmi(value);
+    return value;
   } else if (value->IsProxy()) {
     return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
   } else {
@@ -3582,7 +3586,8 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
   void* result;
   if (obj->IsSmi()) {
-    result = i::Internals::GetExternalPointerFromSmi(*obj);
+    // The external value was an aligned pointer.
+    result = *obj;
   } else if (obj->IsProxy()) {
     result = ExternalValueImpl(obj);
   } else {
@@ -3797,6 +3802,35 @@
 }
 
 
+void v8::Date::DateTimeConfigurationChangeNotification() {
+  ON_BAILOUT("v8::Date::DateTimeConfigurationChangeNotification()", return);
+  LOG_API("Date::DateTimeConfigurationChangeNotification");
+  ENTER_V8;
+
+  HandleScope scope;
+
+  // Get the function ResetDateCache (defined in date-delay.js).
+  i::Handle<i::String> func_name_str =
+      i::Factory::LookupAsciiSymbol("ResetDateCache");
+  i::MaybeObject* result = i::Top::builtins()->GetProperty(*func_name_str);
+  i::Object* object_func;
+  if (!result->ToObject(&object_func)) {
+    return;
+  }
+
+  if (object_func->IsJSFunction()) {
+    i::Handle<i::JSFunction> func =
+        i::Handle<i::JSFunction>(i::JSFunction::cast(object_func));
+
+    // Call ResetDateCache(0 but expect no exceptions:
+    bool caught_exception = false;
+    i::Handle<i::Object> result =
+        i::Execution::TryCall(func, i::Top::builtins(), 0, NULL,
+        &caught_exception);
+  }
+}
+
+
 static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
   char flags_buf[3];
   int num_flags = 0;
@@ -4890,6 +4924,13 @@
 }
 
 
+const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const {
+  IsDeadCheck("v8::HeapSnapshot::GetNodeById");
+  return reinterpret_cast<const HeapGraphNode*>(
+      ToInternal(this)->GetEntryById(id));
+}
+
+
 const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
     const HeapSnapshot* snapshot) const {
   IsDeadCheck("v8::HeapSnapshot::CompareWith");
@@ -4936,7 +4977,8 @@
 
 
 const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
-                                               HeapSnapshot::Type type) {
+                                               HeapSnapshot::Type type,
+                                               ActivityControl* control) {
   IsDeadCheck("v8::HeapProfiler::TakeSnapshot");
   i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
   switch (type) {
@@ -4950,12 +4992,74 @@
       UNREACHABLE();
   }
   return reinterpret_cast<const HeapSnapshot*>(
-      i::HeapProfiler::TakeSnapshot(*Utils::OpenHandle(*title), internal_type));
+      i::HeapProfiler::TakeSnapshot(
+          *Utils::OpenHandle(*title), internal_type, control));
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 
+v8::Testing::StressType internal::Testing::stress_type_ =
+    v8::Testing::kStressTypeOpt;
+
+
+void Testing::SetStressRunType(Testing::StressType type) {
+  internal::Testing::set_stress_type(type);
+}
+
+int Testing::GetStressRuns() {
+  if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
+#ifdef DEBUG
+  // In debug mode the code runs much slower so stressing will only make two
+  // runs.
+  return 2;
+#else
+  return 5;
+#endif
+}
+
+
+static void SetFlagsFromString(const char* flags) {
+  V8::SetFlagsFromString(flags, i::StrLength(flags));
+}
+
+
+void Testing::PrepareStressRun(int run) {
+  static const char* kLazyOptimizations =
+      "--prepare-always-opt --nolimit-inlining "
+      "--noalways-opt --noopt-eagerly";
+  static const char* kEagerOptimizations = "--opt-eagerly";
+  static const char* kForcedOptimizations = "--always-opt";
+
+  // If deoptimization stressed turn on frequent deoptimization. If no value
+  // is spefified through --deopt-every-n-times use a default default value.
+  static const char* kDeoptEvery13Times = "--deopt-every-n-times=13";
+  if (internal::Testing::stress_type() == Testing::kStressTypeDeopt &&
+      internal::FLAG_deopt_every_n_times == 0) {
+    SetFlagsFromString(kDeoptEvery13Times);
+  }
+
+#ifdef DEBUG
+  // As stressing in debug mode only make two runs skip the deopt stressing
+  // here.
+  if (run == GetStressRuns() - 1) {
+    SetFlagsFromString(kForcedOptimizations);
+  } else {
+    SetFlagsFromString(kEagerOptimizations);
+    SetFlagsFromString(kLazyOptimizations);
+  }
+#else
+  if (run == GetStressRuns() - 1) {
+    SetFlagsFromString(kForcedOptimizations);
+  } else if (run == GetStressRuns() - 2) {
+    SetFlagsFromString(kEagerOptimizations);
+  } else {
+    SetFlagsFromString(kLazyOptimizations);
+  }
+#endif
+}
+
+
 namespace internal {
 
 
diff --git a/src/api.h b/src/api.h
index e36160c..d07d75b 100644
--- a/src/api.h
+++ b/src/api.h
@@ -31,6 +31,8 @@
 #include "apiutils.h"
 #include "factory.h"
 
+#include "../include/v8-testing.h"
+
 namespace v8 {
 
 // Constants used in the implementation of the API.  The most natural thing
@@ -489,6 +491,18 @@
          (!blocks_.is_empty() && prev_limit != NULL));
 }
 
+
+class Testing {
+ public:
+  static v8::Testing::StressType stress_type() { return stress_type_; }
+  static void set_stress_type(v8::Testing::StressType stress_type) {
+    stress_type_ = stress_type;
+  }
+
+ private:
+  static v8::Testing::StressType stress_type_;
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_API_H_
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 15720c9..68d32f1 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -110,6 +110,30 @@
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 Address RelocInfo::call_address() {
   // The 2 instructions offset assumes patched debug break slot or return
   // sequence.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index cfdd164..8fdcf18 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -70,7 +70,7 @@
 #endif  // def __arm__
 
 
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool portable) {
 #ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
   if (FLAG_enable_vfp3) {
@@ -81,7 +81,7 @@
     supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
-  if (Serializer::enabled()) {
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     supported_ |= CpuFeaturesImpliedByCompiler();
     return;  // No features if we might serialize.
@@ -98,6 +98,8 @@
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
+
+  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -318,7 +320,10 @@
 static byte* spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size)
-    : positions_recorder_(this) {
+    : positions_recorder_(this),
+      allow_peephole_optimization_(false) {
+  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
+  allow_peephole_optimization_ = FLAG_peephole_optimization;
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -987,6 +992,7 @@
 
 
 void Assembler::bl(int branch_offset, Condition cond) {
+  positions_recorder()->WriteRecordedPositions();
   ASSERT((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
   ASSERT(is_int24(imm24));
@@ -1650,9 +1656,10 @@
   emit(reinterpret_cast<Instr>(msg));
 #else  // def __arm__
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
+  ASSERT(cond == al);
   bkpt(0);
 #else  // ndef CAN_USE_ARMV5_INSTRUCTIONS
-  svc(0x9f0001);
+  svc(0x9f0001, cond);
 #endif  // ndef CAN_USE_ARMV5_INSTRUCTIONS
 #endif  // def __arm__
 }
@@ -1826,13 +1833,18 @@
                      const Condition cond) {
   // Ddst = MEM(Rbase + offset).
   // Instruction details available in ARM DDI 0406A, A8-628.
-  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1011(11-8) | offset
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
-  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+  emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
 
@@ -1843,15 +1855,20 @@
                      const Condition cond) {
   // Sdst = MEM(Rbase + offset).
   // Instruction details available in ARM DDI 0406A, A8-628.
-  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | offset
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
   int sd, d;
   dst.split_code(&sd, &d);
-  emit(cond | d*B22 | 0xD9*B20 | base.code()*B16 | sd*B12 |
+  emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
        0xA*B8 | ((offset / 4) & 255));
 }
 
@@ -1862,13 +1879,18 @@
                      const Condition cond) {
   // MEM(Rbase + offset) = Dsrc.
   // Instruction details available in ARM DDI 0406A, A8-786.
-  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
   // Vsrc(15-12) | 1011(11-8) | (offset/4)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
-  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+  emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
        0xB*B8 | ((offset / 4) & 255));
 }
 
@@ -1879,15 +1901,20 @@
                      const Condition cond) {
   // MEM(Rbase + offset) = SSrc.
   // Instruction details available in ARM DDI 0406A, A8-786.
-  // cond(31-28) | 1101(27-24)| 1000(23-20) | Rbase(19-16) |
+  // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | (offset/4)
   ASSERT(CpuFeatures::IsEnabled(VFP3));
+  int u = 1;
+  if (offset < 0) {
+    offset = -offset;
+    u = 0;
+  }
   ASSERT(offset % 4 == 0);
   ASSERT((offset / 4) < 256);
   ASSERT(offset >= 0);
   int sd, d;
   src.split_code(&sd, &d);
-  emit(cond | d*B22 | 0xD8*B20 | base.code()*B16 | sd*B12 |
+  emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
        0xA*B8 | ((offset / 4) & 255));
 }
 
@@ -2411,7 +2438,7 @@
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     CheckBuffer();
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
@@ -2469,6 +2496,20 @@
 }
 
 
+void Assembler::db(uint8_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint8_t*>(pc_) = data;
+  pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint32_t*>(pc_) = data;
+  pc_ += sizeof(uint32_t);
+}
+
+
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index ee4c9aa..36f7507 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -69,7 +69,39 @@
 //
 // Core register
 struct Register {
-  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 8;
+
+  static int ToAllocationIndex(Register reg) {
+    return reg.code();
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "r0",
+      "r1",
+      "r2",
+      "r3",
+      "r4",
+      "r5",
+      "r6",
+      "r7",
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
@@ -132,6 +164,48 @@
 
 // Double word VFP register.
 struct DwVfpRegister {
+  // d0 has been excluded from allocation. This is following ia32
+  // where xmm0 is excluded. This should be revisited.
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 15;
+
+  static int ToAllocationIndex(DwVfpRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static DwVfpRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index + 1);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "d1",
+      "d2",
+      "d3",
+      "d4",
+      "d5",
+      "d6",
+      "d7",
+      "d8",
+      "d9",
+      "d10",
+      "d11",
+      "d12",
+      "d13",
+      "d14",
+      "d15"
+    };
+    return names[index];
+  }
+
+  static DwVfpRegister from_code(int code) {
+    DwVfpRegister r = { code };
+    return r;
+  }
+
   // Supporting d0 to d15, can be later extended to d31.
   bool is_valid() const { return 0 <= code_ && code_ < 16; }
   bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
@@ -167,6 +241,9 @@
 };
 
 
+typedef DwVfpRegister DoubleRegister;
+
+
 // Support for the VFP registers s0 to s31 (d0 to d15).
 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
 const SwVfpRegister s0  = {  0 };
@@ -286,6 +363,9 @@
 
 // Condition field in instructions.
 enum Condition {
+  // any value < 0 is considered no_condition
+  no_condition  = -1,
+
   eq =  0 << 28,  // Z set            equal.
   ne =  1 << 28,  // Z clear          not equal.
   nz =  1 << 28,  // Z clear          not zero.
@@ -527,7 +607,7 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool portable);
 
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
@@ -1148,15 +1228,20 @@
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
+
   int pc_offset() const { return pc_ - buffer_; }
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
   bool can_peephole_optimize(int instructions) {
-    if (!FLAG_peephole_optimization) return false;
+    if (!allow_peephole_optimization_) return false;
     if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
     return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
   }
@@ -1185,6 +1270,8 @@
   static bool IsLdrPcImmediateOffset(Instr instr);
   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
 
+  // Check if is time to emit a constant pool for pending reloc info entries
+  void CheckConstPool(bool force_emit, bool require_jump);
 
  protected:
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@@ -1201,9 +1288,6 @@
   // Patch branch instruction at pos to branch to given branch target pos
   void target_at_put(int pos, int target_pos);
 
-  // Check if is time to emit a constant pool for pending reloc info entries
-  void CheckConstPool(bool force_emit, bool require_jump);
-
   // Block the emission of the constant pool before pc_offset
   void BlockConstPoolBefore(int pc_offset) {
     if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
@@ -1317,6 +1401,7 @@
   friend class BlockConstPoolScope;
 
   PositionsRecorder positions_recorder_;
+  bool allow_peephole_optimization_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
 };
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 862ef39..6480a91 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,8 @@
 
 #include "codegen-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 #include "runtime.h"
 
 namespace v8 {
@@ -1089,6 +1091,80 @@
 }
 
 
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Preserve the function.
+  __ push(r1);
+
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(r1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore saved function.
+  __ pop(r1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ Jump(r2);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  __ EnterInternalFrame();
+  // Pass the function and deoptimization type to the runtime system.
+  __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  __ LeaveInternalFrame();
+
+  // Get the full codegen state from the stack and untag it -> r6.
+  __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r6);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ cmp(r6, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ b(ne, &with_tos_register);
+  __ add(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+  __ cmp(r6, Operand(FullCodeGenerator::TOS_REG));
+  __ b(ne, &unknown_state);
+  __ add(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  __ stop("builtins-arm.cc: NotifyOSR");
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  __ stop("builtins-arm.cc: OnStackReplacement");
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   // r0: actual number of arguments
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 76a610b..5ec8584 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -82,12 +82,15 @@
   // write barrier because the allocated object is in new space.
   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
   __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
   __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
   __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
   __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
   __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
+
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -1088,6 +1091,10 @@
   Label not_heap_number;
   Register scratch = r7;
 
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(tos_, ip);
+  __ b(eq, &false_result);
+
   // HeapNumber => false iff +0, -0, or NaN.
   __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -2200,6 +2207,14 @@
 }
 
 
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  UNIMPLEMENTED();
+  return Handle<Code>::null();
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Argument is a number and is on stack and in r0.
   Label runtime_call;
@@ -2290,6 +2305,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
       return Runtime::kAbort;
@@ -2640,7 +2656,7 @@
   // r0:r1: result
   // sp: stack pointer
   // fp: frame pointer
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles_);
 
   // check if we should retry or throw exception
   Label retry;
@@ -2689,7 +2705,7 @@
   // builtin once.
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles_);
 
   // r4: number of arguments (C callee-saved)
   // r5: pointer to builtin function (C callee-saved)
@@ -2777,6 +2793,15 @@
   // Setup frame pointer for the frame to be pushed.
   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If this is the outermost JS call, set js_entry_sp value.
+  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
+  __ ldr(r6, MemOperand(r5));
+  __ cmp(r6, Operand(0, RelocInfo::NONE));
+  __ str(fp, MemOperand(r5), eq);
+#endif
+
   // Call a faked try-block that does the invoke.
   __ bl(&invoke);
 
@@ -2839,6 +2864,15 @@
   // No need to restore registers
   __ add(sp, sp, Operand(StackHandlerConstants::kSize));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // If current FP value is the same as js_entry_sp value, it means that
+  // the current function is the outermost.
+  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
+  __ ldr(r6, MemOperand(r5));
+  __ cmp(fp, Operand(r6));
+  __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+  __ str(r6, MemOperand(r5), eq);
+#endif
 
   __ bind(&exit);  // r0 holds result
   // Restore the top frame descriptors from the stack.
@@ -2859,80 +2893,97 @@
 }
 
 
-// This stub performs an instanceof, calling the builtin function if
-// necessary.  Uses r1 for the object, r0 for the function that it may
-// be an instance of (these are fetched from the stack).
+// Uses registers r0 to r4. Expected input is
+// function in r0 (or at sp+1*ptrsz) and object in
+// r1 (or at sp), depending on whether or not
+// args_in_registers() is true.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  // Get the object - slow case for smis (we may need to throw an exception
-  // depending on the rhs).
-  Label slow, loop, is_instance, is_not_instance;
-  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
-  __ BranchOnSmi(r0, &slow);
+  // Fixed register usage throughout the stub:
+  const Register object = r1;  // Object (lhs).
+  const Register map = r3;  // Map of the object.
+  const Register function = r0;  // Function (rhs).
+  const Register prototype = r4;  // Prototype of the function.
+  const Register scratch = r2;
+  Label slow, loop, is_instance, is_not_instance, not_js_object;
+  if (!args_in_registers()) {
+    __ ldr(function, MemOperand(sp, 1 * kPointerSize));
+    __ ldr(object, MemOperand(sp, 0));
+  }
 
-  // Check that the left hand is a JS object and put map in r3.
-  __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &slow);
-  __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
-  __ b(gt, &slow);
-
-  // Get the prototype of the function (r4 is result, r2 is scratch).
-  __ ldr(r1, MemOperand(sp, 0));
-  // r1 is function, r3 is map.
+  // Check that the left hand is a JS object and load map.
+  __ BranchOnSmi(object, &slow);
+  __ IsObjectJSObjectType(object, map, scratch, &slow);
 
   // Look up the function and the map in the instanceof cache.
   Label miss;
   __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
-  __ cmp(r1, ip);
+  __ cmp(object, ip);
   __ b(ne, &miss);
   __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
-  __ cmp(r3, ip);
+  __ cmp(map, ip);
   __ b(ne, &miss);
-  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
-  __ pop();
-  __ pop();
-  __ mov(pc, Operand(lr));
+  __ LoadRoot(function, Heap::kInstanceofCacheAnswerRootIndex);
+  __ Ret(args_in_registers() ? 0 : 2);
 
   __ bind(&miss);
-  __ TryGetFunctionPrototype(r1, r4, r2, &slow);
+  __ TryGetFunctionPrototype(object, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
-  __ BranchOnSmi(r4, &slow);
-  __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &slow);
-  __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
-  __ b(gt, &slow);
+  __ BranchOnSmi(prototype, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
 
-  __ StoreRoot(r1, Heap::kInstanceofCacheFunctionRootIndex);
-  __ StoreRoot(r3, Heap::kInstanceofCacheMapRootIndex);
+  __ StoreRoot(object, Heap::kInstanceofCacheFunctionRootIndex);
+  __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
 
   // Register mapping: r3 is object map and r4 is function prototype.
   // Get prototype of object into r2.
-  __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
+  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
 
   // Loop through the prototype chain looking for the function prototype.
   __ bind(&loop);
-  __ cmp(r2, Operand(r4));
+  __ cmp(scratch, Operand(prototype));
   __ b(eq, &is_instance);
   __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(r2, ip);
+  __ cmp(scratch, ip);
   __ b(eq, &is_not_instance);
-  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
 
   __ bind(&is_instance);
   __ mov(r0, Operand(Smi::FromInt(0)));
   __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
-  __ pop();
-  __ pop();
-  __ mov(pc, Operand(lr));  // Return.
+  __ Ret(args_in_registers() ? 0 : 2);
 
   __ bind(&is_not_instance);
   __ mov(r0, Operand(Smi::FromInt(1)));
-  __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
-  __ pop();
-  __ pop();
-  __ mov(pc, Operand(lr));  // Return.
+  __ Ret(args_in_registers() ? 0 : 2);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ BranchOnSmi(function, &slow);
+  __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE);
+  __ b(ne, &slow);
+
+  // Null is not instance of anything.
+  __ cmp(scratch, Operand(Factory::null_value()));
+  __ b(ne, &object_not_null);
+  __ mov(r0, Operand(Smi::FromInt(1)));
+  __ Ret(args_in_registers() ? 0 : 2);
+
+  __ bind(&object_not_null);
+  // Smi values are not instances of anything.
+  __ BranchOnNotSmi(object, &object_not_null_or_smi);
+  __ mov(r0, Operand(Smi::FromInt(1)));
+  __ Ret(args_in_registers() ? 0 : 2);
+
+  __ bind(&object_not_null_or_smi);
+  // String values are not instances of anything.
+  __ IsObjectJSStringType(object, scratch, &slow);
+  __ mov(r0, Operand(Smi::FromInt(1)));
+  __ Ret(args_in_registers() ? 0 : 2);
 
   // Slow-case.  Tail call builtin.
   __ bind(&slow);
@@ -3429,6 +3480,95 @@
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(ne, &slowcase);
+  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
+  __ b(hi, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  // Size of JSArray with two in-object properties and the header of a
+  // FixedArray.
+  int objects_size =
+      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
+  __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
+  __ add(r2, r5, Operand(objects_size));
+  __ AllocateInNewSpace(
+      r2,  // In: Size, in words.
+      r0,  // Out: Start of allocation (tagged).
+      r3,  // Scratch register.
+      r4,  // Scratch register.
+      &slowcase,
+      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+  // r0: Start of allocated area, object-tagged.
+  // r1: Number of elements in array, as smi.
+  // r5: Number of elements, untagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ add(r3, r0, Operand(JSRegExpResult::kSize));
+  __ mov(r4, Operand(Factory::empty_fixed_array()));
+  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
+  __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+  // Set input, index and length fields from arguments.
+  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
+  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
+  __ ldr(r1, MemOperand(sp, kPointerSize * 1));
+  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+  __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+
+  // Fill out the elements FixedArray.
+  // r0: JSArray, tagged.
+  // r3: FixedArray, tagged.
+  // r5: Number of elements in array, untagged.
+
+  // Set map.
+  __ mov(r2, Operand(Factory::fixed_array_map()));
+  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  // Set FixedArray length.
+  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
+  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
+  // Fill contents of fixed-array with the-hole.
+  __ mov(r2, Operand(Factory::the_hole_value()));
+  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // Fill fixed array elements with hole.
+  // r0: JSArray, tagged.
+  // r2: the hole.
+  // r3: Start of elements in FixedArray.
+  // r5: Number of elements to fill.
+  Label loop;
+  __ tst(r5, Operand(r5));
+  __ bind(&loop);
+  __ b(le, &done);  // Jump if r1 is negative or zero.
+  __ sub(r5, r5, Operand(1), SetCC);
+  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow;
 
@@ -4721,6 +4861,123 @@
 }
 
 
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SMIS);
+  Label miss;
+  __ orr(r2, r1, r0);
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(ne, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    __ sub(r0, r0, r1, SetCC);
+  } else {
+    __ sub(r1, r1, r0, SetCC);
+    // Correct sign of result in case of overflow.
+    __ rsb(r1, r1, Operand(0), SetCC, vs);
+    __ mov(r0, r1);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  Label generic_stub;
+  Label unordered;
+  Label miss;
+  __ and_(r2, r1, Operand(r0));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, &generic_stub);
+
+  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+  __ b(ne, &miss);
+  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+  __ b(ne, &miss);
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or VFP3 is unsupported.
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+
+    // Load left and right operand
+    __ sub(r2, r1, Operand(kHeapObjectTag));
+    __ vldr(d0, r2, HeapNumber::kValueOffset);
+    __ sub(r2, r0, Operand(kHeapObjectTag));
+    __ vldr(d1, r2, HeapNumber::kValueOffset);
+
+    // Compare operands
+    __ vcmp(d0, d1);
+    __ vmrs(pc);  // Move vector status bits to normal status bits.
+
+    // Don't base result on status bits when a NaN is involved.
+    __ b(vs, &unordered);
+
+    // Return a result of -1, 0, or 1, based on status bits.
+    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+    __ mov(r0, Operand(LESS), LeaveCC, lt);
+    __ mov(r0, Operand(GREATER), LeaveCC, gt);
+    __ Ret();
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
+  __ bind(&generic_stub);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::OBJECTS);
+  Label miss;
+  __ and_(r2, r1, Operand(r0));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
+  __ b(ne, &miss);
+  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
+  __ b(ne, &miss);
+
+  ASSERT(GetCondition() == eq);
+  __ sub(r0, r0, Operand(r1));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  __ Push(r1, r0);
+  __ push(lr);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+  __ EnterInternalFrame();
+  __ Push(r1, r0);
+  __ mov(ip, Operand(Smi::FromInt(op_)));
+  __ push(ip);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(lr);
+  __ pop(r0);
+  __ pop(r1);
+  __ Jump(r2);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 2e07e3b..8ffca77 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -106,9 +106,9 @@
   // Minor key encoding in 17 bits.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 2> {};
-  class RegisterBits: public BitField<bool, 10, 1> {};
-  class KnownIntBits: public BitField<int, 11, kKnownRhsKeyBits> {};
+  class TypeInfoBits: public BitField<int, 8, 3> {};
+  class RegisterBits: public BitField<bool, 11, 1> {};
+  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -196,6 +196,10 @@
 
   const char* GetName();
 
+  virtual void FinishCode(Code* code) {
+    code->set_binary_op_type(runtime_operands_type_);
+  }
+
 #ifdef DEBUG
   void Print() {
     if (!specialized_on_rhs_) {
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 469d41f..59bc14e 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -36,7 +36,7 @@
 #include "debug.h"
 #include "ic-inl.h"
 #include "jsregexp.h"
-#include "jump-target-light-inl.h"
+#include "jump-target-inl.h"
 #include "parser.h"
 #include "regexp-macro-assembler.h"
 #include "regexp-stack.h"
@@ -79,12 +79,12 @@
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -165,6 +165,9 @@
 
   int slots = scope()->num_parameters() + scope()->num_stack_slots();
   ScopedVector<TypeInfo> type_info_array(slots);
+  for (int i = 0; i < slots; i++) {
+    type_info_array[i] = TypeInfo::Unknown();
+  }
   type_info_ = &type_info_array;
 
   ASSERT(allocator_ == NULL);
@@ -1150,7 +1153,7 @@
   }
   // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
   // if the shift if more than 0 or SHR if the shit is more than 1.
-  if (!( (op_ == Token::AND && value_ >= 0) ||
+  if (!( (op_ == Token::AND) ||
         ((op_ == Token::SAR) && (shift_value > 0)) ||
         ((op_ == Token::SHR) && (shift_value > 1)))) {
     __ add(r3, int32, Operand(0x40000000), SetCC);
@@ -1411,10 +1414,8 @@
           default: UNREACHABLE();
         }
         deferred->BindExit();
-        TypeInfo result_type = TypeInfo::Integer32();
-        if (op == Token::BIT_AND && int_value >= 0) {
-          result_type = TypeInfo::Smi();
-        }
+        TypeInfo result_type =
+            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
         frame_->EmitPush(tos, result_type);
       }
       break;
@@ -5163,11 +5164,11 @@
 
     // Set the bit in the map to indicate that it has been checked safe for
     // default valueOf and set true result.
-    __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
     __ orr(scratch1_,
            scratch1_,
            Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
+    __ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
     __ mov(map_result_, Operand(1));
     __ jmp(exit_label());
     __ bind(&false_result);
@@ -5418,97 +5419,14 @@
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
+
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
-    Label slowcase;
-    Label done;
-    __ ldr(r1, MemOperand(sp, kPointerSize * 2));
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(ne, &slowcase);
-    __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
-    __ b(hi, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    // Size of JSArray with two in-object properties and the header of a
-    // FixedArray.
-    int objects_size =
-        (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
-    __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
-    __ add(r2, r5, Operand(objects_size));
-    __ AllocateInNewSpace(
-        r2,  // In: Size, in words.
-        r0,  // Out: Start of allocation (tagged).
-        r3,  // Scratch register.
-        r4,  // Scratch register.
-        &slowcase,
-        static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-    // r0: Start of allocated area, object-tagged.
-    // r1: Number of elements in array, as smi.
-    // r5: Number of elements, untagged.
-
-    // Set JSArray map to global.regexp_result_map().
-    // Set empty properties FixedArray.
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    // Interleave operations for better latency.
-    __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
-    __ add(r3, r0, Operand(JSRegExpResult::kSize));
-    __ mov(r4, Operand(Factory::empty_fixed_array()));
-    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
-    __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
-    __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
-    __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
-    __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-
-    // Set input, index and length fields from arguments.
-    __ ldm(ia_w, sp, static_cast<RegList>(r2.bit() | r4.bit()));
-    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
-    __ add(sp, sp, Operand(kPointerSize));
-    __ str(r4, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
-    __ str(r2, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
-
-    // Fill out the elements FixedArray.
-    // r0: JSArray, tagged.
-    // r3: FixedArray, tagged.
-    // r5: Number of elements in array, untagged.
-
-    // Set map.
-    __ mov(r2, Operand(Factory::fixed_array_map()));
-    __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-    // Set FixedArray length.
-    __ mov(r6, Operand(r5, LSL, kSmiTagSize));
-    __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
-    // Fill contents of fixed-array with the-hole.
-    __ mov(r2, Operand(Factory::the_hole_value()));
-    __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    // Fill fixed array elements with hole.
-    // r0: JSArray, tagged.
-    // r2: the hole.
-    // r3: Start of elements in FixedArray.
-    // r5: Number of elements to fill.
-    Label loop;
-    __ tst(r5, Operand(r5));
-    __ bind(&loop);
-    __ b(le, &done);  // Jump if r1 is negative or zero.
-    __ sub(r5, r5, Operand(1), SetCC);
-    __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
-    __ bind(&done);
-  }
-  frame_->Forget(3);
+  RegExpConstructResultStub stub;
+  frame_->SpillAll();
+  frame_->CallStub(&stub, 3);
   frame_->EmitPush(r0);
 }
 
@@ -5758,6 +5676,20 @@
 }
 
 
+void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+  if (CpuFeatures::IsSupported(VFP3)) {
+    TranscendentalCacheStub stub(TranscendentalCache::LOG);
+    frame_->SpillAllButCopyTOSToR0();
+    frame_->CallStub(&stub, 1);
+  } else {
+    frame_->CallRuntime(Runtime::kMath_log, 1);
+  }
+  frame_->EmitPush(r0);
+}
+
+
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
@@ -6537,7 +6469,7 @@
     case Token::INSTANCEOF: {
       Load(left);
       Load(right);
-      InstanceofStub stub;
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       frame_->CallStub(&stub, 2);
       // At this point if instanceof succeeded then r0 == 0.
       __ tst(r0, Operand(r0));
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 1930f5e..589e704 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -209,6 +209,9 @@
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -305,8 +308,9 @@
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
+  virtual void VisitSlot(Slot* node);
 #define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
@@ -516,6 +520,7 @@
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
   void GenerateMathSqrt(ZoneList<Expression*>* args);
+  void GenerateMathLog(ZoneList<Expression*>* args);
 
   void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
 
@@ -578,6 +583,7 @@
   friend class FastCodeGenerator;
   friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
+  friend class LCodeGen;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index e998b6f..b359dce 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -42,7 +42,10 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Probe(true);
+  if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
+    V8::DisableCrankshaft();
+  }
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
new file mode 100644
index 0000000..3917d6d
--- /dev/null
+++ b/src/arm/deoptimizer-arm.cc
@@ -0,0 +1,503 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+int Deoptimizer::table_entry_size_ = 16;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each return after a safepoint insert an absolute call to the
+  // corresponding deoptimization entry.
+  unsigned last_pc_offset = 0;
+  SafepointTable table(function->code());
+  for (unsigned i = 0; i < table.length(); i++) {
+    unsigned pc_offset = table.GetPcOffset(i);
+    int deoptimization_index = table.GetDeoptimizationIndex(i);
+    int gap_code_size = table.GetGapCodeSize(i);
+    // Check that we did not shoot past next safepoint.
+    // TODO(srdjan): How do we guarantee that safepoint code does not
+    // overlap other safepoint patching code?
+    CHECK(pc_offset >= last_pc_offset);
+#ifdef DEBUG
+    // Destroy the code which is not supposed to be run again.
+    int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
+    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                          instructions);
+    for (int x = 0; x < instructions; x++) {
+      destroyer.masm()->bkpt(0);
+    }
+#endif
+    last_pc_offset = pc_offset;
+    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+      const int kCallInstructionSizeInWords = 3;
+      CodePatcher patcher(code->instruction_start() + pc_offset + gap_code_size,
+                          kCallInstructionSizeInWords);
+      Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
+          deoptimization_index, Deoptimizer::LAZY);
+      patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
+      last_pc_offset +=
+          gap_code_size + kCallInstructionSizeInWords * Assembler::kInstrSize;
+    }
+  }
+
+
+#ifdef DEBUG
+  // Destroy the code which is not supposed to be run again.
+  int instructions =
+      (code->safepoint_table_start() - last_pc_offset) / Assembler::kInstrSize;
+  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                        instructions);
+  for (int x = 0; x < instructions; x++) {
+    destroyer.masm()->bkpt(0);
+  }
+#endif
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  node->set_next(deoptimizing_code_list_);
+  deoptimizing_code_list_ = node;
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+  }
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  UNIMPLEMENTED();
+}
+
+
+// This code is very similar to ia32 code, but relies on register names (fp, sp)
+// and how the frame is laid out.
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  intptr_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) {
+    output_frame->SetRegister(fp.code(), fp_value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the function so long as we don't
+  // optimize functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(function->context());
+  // The context for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) {
+    output_frame->SetRegister(cp.code(), value);
+  }
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+  if (is_topmost) {
+    output_frame->SetRegister(pc.code(), pc_value);
+  }
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Code* continuation = (bailout_type_ == EAGER)
+        ? Builtins::builtin(Builtins::NotifyDeoptimized)
+        : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+
+  if (output_count_ - 1 == frame_index) iterator->Done();
+}
+
+
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+  // TOS: bailout-id; TOS+1: return address if not EAGER.
+  CpuFeatures::Scope scope(VFP3);
+  // Save all general purpose registers before messing with them.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  // Everything but pc, lr and ip which will be saved but not restored.
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
+
+  // Save all general purpose registers before messing with them.
+  __ sub(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+    DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ vstr(vfp_reg, sp, offset);
+  }
+
+  // Push all 16 registers (needed to populate FrameDescription::registers_).
+  __ stm(db_w, sp, restored_regs  | sp.bit() | lr.bit() | pc.bit());
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible (r3) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register r4.
+  if (type() == EAGER) {
+    __ mov(r3, Operand(0));
+    // Correct one word for bailout id.
+    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ mov(r3, lr);
+    // Correct two words for bailout id and return address.
+    __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+  __ sub(r4, fp, r4);
+
+  // Allocate a new deoptimizer object.
+  // Pass four arguments in r0 to r3 and fifth argument on stack.
+  __ PrepareCallCFunction(5, r5);
+  __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(r1, Operand(type()));  // bailout type,
+  // r2: bailout id already loaded.
+  // r3: code address or 0 already loaded.
+  __ str(r4, MemOperand(sp, 0 * kPointerSize));  // Fp-to-sp delta.
+  // Call Deoptimizer::New().
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+
+  // Preserve "deoptimizer" object in register r0 and get the input
+  // frame descriptor pointer to r1 (deoptimizer->input_);
+  __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ ldr(r2, MemOperand(sp, i * kPointerSize));
+    __ str(r2, MemOperand(r1, offset));
+  }
+
+  // Copy VFP registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ vldr(d0, sp, src_offset);
+    __ vstr(d0, r1, dst_offset);
+  }
+
+  // Remove the bailout id, eventually return address, and the saved registers
+  // from the stack.
+  if (type() == EAGER) {
+    __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  } else {
+    __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
+  }
+
+  // Compute a pointer to the unwinding limit in register r2; that is
+  // the first stack slot not part of the input frame.
+  __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
+  __ add(r2, r2, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ add(r3,  r1, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(r4);
+  __ str(r4, MemOperand(r3, 0));
+  __ add(r3, r3, Operand(sizeof(uint32_t)));
+  __ cmp(r2, sp);
+  __ b(ne, &pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(r0);  // Preserve deoptimizer object across call.
+  // r0: deoptimizer object; r1: scratch.
+  __ PrepareCallCFunction(1, r1);
+  // Call Deoptimizer::ComputeOutputFrames().
+  __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+  __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: r0 = current "FrameDescription** output_",
+  // r1 = one past the last FrameDescription**.
+  __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
+  __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset()));  // r0 is output_.
+  __ add(r1, r0, Operand(r1, LSL, 2));
+  __ bind(&outer_push_loop);
+  // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
+  __ ldr(r2, MemOperand(r0, 0));  // output_[ix]
+  __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ sub(r3, r3, Operand(sizeof(uint32_t)));
+  // __ add(r6, r2, Operand(r3, LSL, 1));
+  __ add(r6, r2, Operand(r3));
+  __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
+  __ push(r7);
+  __ cmp(r3, Operand(0));
+  __ b(ne, &inner_push_loop);  // test for gt?
+  __ add(r0, r0, Operand(kPointerSize));
+  __ cmp(r0, r1);
+  __ b(lt, &outer_push_loop);
+
+  // In case of OSR, we have to restore the XMM registers.
+  if (type() == OSR) {
+    UNIMPLEMENTED();
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
+    __ push(r6);
+  }
+
+  __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
+  __ push(r6);
+  __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
+  __ push(r6);
+
+  // Push the registers from the last output frame.
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ ldr(r6, MemOperand(r2, offset));
+    __ push(r6);
+  }
+
+  // Restore the registers from the stack.
+  __ ldm(ia_w, sp, restored_regs);  // all but pc registers.
+  __ pop(ip);  // remove sp
+  __ pop(ip);  // remove lr
+
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(r10, Operand(roots_address));
+
+  __ pop(ip);  // remove pc
+  __ pop(r7);  // get continuation, leave pc on stack
+  __ pop(lr);
+  __ Jump(r7);
+  __ stop("Unreachable.");
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  // Create a sequence of deoptimization entries. Note that any
+  // registers may be still live.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    if (type() == EAGER) {
+      __ nop();
+    } else {
+      // Emulate ia32 like call by pushing return address to stack.
+      __ push(lr);
+    }
+    __ mov(ip, Operand(i));
+    __ push(ip);
+    __ b(&done);
+    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+}
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index b0c0990..d2726cf 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -38,7 +38,12 @@
 
 
 Address ExitFrame::ComputeStackPointer(Address fp) {
-  return fp + ExitFrameConstants::kSPOffset;
+  Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
+  Address sp = fp + ExitFrameConstants::kSPOffset;
+  if (marker == NULL) {
+    sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
+  }
+  return sp;
 }
 
 
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 5847a6a..00c20ef 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -74,6 +74,18 @@
 static const int kNumCalleeSaved = 7 + kR9Available;
 
 
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+static const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+    kNumJSCallerSaved + kNumCalleeSaved;
+
 // ----------------------------------------------------
 
 
@@ -99,7 +111,9 @@
   static const int kCodeOffset = -1 * kPointerSize;
   static const int kSPOffset = -1 * kPointerSize;
 
-  static const int kSavedRegistersOffset = 0 * kPointerSize;
+  // TODO(regis): Use a patched sp value on the stack instead.
+  // A marker of 0 indicates that double registers are saved.
+  static const int kMarkerOffset = -2 * kPointerSize;
 
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0b6e13d..d254918 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -171,23 +171,22 @@
     }
   }
 
-  // Check the stack for overflow or break request.
-  { Comment cmnt(masm_, "[ Stack check");
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-    __ cmp(sp, Operand(r2));
-    StackCheckStub stub;
-    __ mov(ip,
-           Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                   RelocInfo::CODE_TARGET),
-           LeaveCC,
-           lo);
-    __ Call(ip, lo);
-  }
-
   if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
 
+  // Check the stack for overflow or break request.
+  { Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailout(info->function(), NO_REGISTERS);
+    Label ok;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(hs, &ok);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+    __ bind(&ok);
+  }
+
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
     VisitStatements(function()->body());
@@ -200,6 +199,10 @@
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   }
   EmitReturnSequence();
+
+  // Force emit the constant pool, so it doesn't get emitted in the middle
+  // of the stack check table.
+  masm()->CheckConstPool(true, false);
 }
 
 
@@ -208,6 +211,21 @@
 }
 
 
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
+}
+
+
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
@@ -280,6 +298,7 @@
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   // For simplicity we always test the accumulator register.
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -302,12 +321,16 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (index == Heap::kUndefinedValueRootIndex ||
       index == Heap::kNullValueRootIndex ||
       index == Heap::kFalseValueRootIndex) {
-    __ b(false_label_);
+    if (false_label_ != fall_through_) __ b(false_label_);
   } else if (index == Heap::kTrueValueRootIndex) {
-    __ b(true_label_);
+    if (true_label_ != fall_through_) __ b(true_label_);
   } else {
     __ LoadRoot(result_register(), index);
     codegen()->DoTest(true_label_, false_label_, fall_through_);
@@ -326,29 +349,34 @@
 
 
 void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
-  // Immediates can be pushed directly.
+  // Immediates cannot be pushed directly.
   __ mov(result_register(), Operand(lit));
   __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ b(false_label_);
+    if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ b(true_label_);
+    if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
+    if (false_label_ != fall_through_) __ b(false_label_);
       __ b(false_label_);
     } else {
-      __ b(true_label_);
+      if (true_label_ != fall_through_) __ b(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ b(false_label_);
+      if (false_label_ != fall_through_) __ b(false_label_);
     } else {
-      __ b(true_label_);
+      if (true_label_ != fall_through_) __ b(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -388,13 +416,14 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -429,8 +458,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -454,6 +483,10 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (flag) {
     if (true_label_ != fall_through_) __ b(true_label_);
   } else {
@@ -534,6 +567,33 @@
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ b(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(r0, ip);
+    Split(eq, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -656,6 +716,8 @@
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
 
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
 
@@ -721,6 +783,7 @@
   }
 
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -832,28 +895,22 @@
   __ bind(&update_each);
   __ mov(result_register(), r3);
   // Perform the assignment as if via '='.
-  EmitAssignment(stmt->each());
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit, stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for the going to the next element by incrementing
   // the index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ pop(r0);
   __ add(r0, r0, Operand(Smi::FromInt(1)));
   __ push(r0);
-  __ b(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ b(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ b(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -1200,12 +1257,15 @@
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForAccumulatorValue(value);
-          __ mov(r2, Operand(key->handle()));
-          __ ldr(r1, MemOperand(sp));
           if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(r2, Operand(key->handle()));
+            __ ldr(r1, MemOperand(sp));
             Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
             EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
           }
           break;
         }
@@ -1300,6 +1360,8 @@
     // Update the write barrier for the array store with r0 as the scratch
     // register.
     __ RecordWrite(r1, Operand(offset), r2, result_register());
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1346,13 +1408,27 @@
       break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
-        VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
+          __ push(r0);
+          __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
         __ ldr(r1, MemOperand(sp, 0));
         __ push(r0);
       } else {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
+          __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
+          __ Push(r1, r0);
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
       }
       break;
   }
@@ -1372,6 +1448,12 @@
       }
     }
 
+    // For property compound assignments we need another deoptimization
+    // point after the property load.
+    if (property != NULL) {
+      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+    }
+
     Token::Value op = expr->binary_op();
     ConstantOperand constant = ShouldInlineSmiCase(op)
         ? GetConstantOperand(op, expr->target(), expr->value())
@@ -1397,6 +1479,9 @@
     } else {
       EmitBinaryOp(op, mode);
     }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1409,6 +1494,8 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(r0);
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1458,7 +1545,7 @@
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1506,6 +1593,8 @@
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+  context()->Plug(r0);
 }
 
 
@@ -1579,8 +1668,6 @@
     }
     __ bind(&done);
   }
-
-  context()->Plug(result_register());
 }
 
 
@@ -1623,10 +1710,10 @@
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
-    context()->DropAndPlug(1, r0);
-  } else {
-    context()->Plug(r0);
+    __ Drop(1);
   }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(r0);
 }
 
 
@@ -1667,10 +1754,10 @@
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
-    context()->DropAndPlug(1, r0);
-  } else {
-    context()->Plug(r0);
+    __ Drop(1);
   }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(r0);
 }
 
 
@@ -1681,13 +1768,14 @@
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(r0);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(r1);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(r0);
   }
-  context()->Plug(r0);
 }
 
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
@@ -1696,18 +1784,19 @@
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
     __ mov(r2, Operand(name));
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->Plug(r0);
@@ -1729,18 +1818,19 @@
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
   __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, r0);  // Drop the key still on the stack.
@@ -1751,16 +1841,17 @@
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, r0);
@@ -1768,6 +1859,12 @@
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1780,7 +1877,7 @@
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
 
-    { PreserveStatementPositionScope pos_scope(masm()->positions_recorder());
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
       VisitForStackValue(fun);
       __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
       __ push(r2);  // Reserved receiver slot.
@@ -1815,10 +1912,11 @@
     }
 
     // Record source position for debugger.
-    SetSourcePosition(expr->position(), FORCED_POSITION);
+    SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
     __ CallStub(&stub);
+    RecordJSReturnSite(expr);
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, r0);
@@ -1832,7 +1930,7 @@
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
       EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
@@ -1873,7 +1971,7 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      { PreserveStatementPositionScope scope(masm()->positions_recorder());
+      { PreservePositionScope scope(masm()->positions_recorder());
         VisitForStackValue(prop->obj());
       }
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
@@ -1881,15 +1979,15 @@
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
       // for a regular property use keyed CallIC.
-      { PreserveStatementPositionScope scope(masm()->positions_recorder());
+      { PreservePositionScope scope(masm()->positions_recorder());
         VisitForStackValue(prop->obj());
       }
       if (prop->is_synthetic()) {
-        { PreserveStatementPositionScope scope(masm()->positions_recorder());
+        { PreservePositionScope scope(masm()->positions_recorder());
           VisitForAccumulatorValue(prop->key());
         }
         // Record source code position for IC call.
-        SetSourcePosition(prop->position(), FORCED_POSITION);
+        SetSourcePosition(prop->position());
         __ pop(r1);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -1913,7 +2011,7 @@
       lit->set_try_full_codegen(true);
     }
 
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
     // Load global receiver object.
@@ -1923,6 +2021,11 @@
     // Emit function call.
     EmitCallWithStub(expr);
   }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
@@ -1970,8 +2073,9 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  __ BranchOnSmi(r0, if_true);
-  __ b(if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ tst(r0, Operand(kSmiTagMask));
+  Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
@@ -1989,6 +2093,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2021,6 +2126,7 @@
   __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, if_false);
   __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2041,6 +2147,7 @@
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2063,6 +2170,7 @@
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ne, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2086,6 +2194,7 @@
   // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
   // used in a few functions in runtime.js which should not normally be hit by
   // this compiler.
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
@@ -2105,6 +2214,7 @@
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2125,6 +2235,7 @@
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2145,6 +2256,7 @@
 
   __ BranchOnSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2176,6 +2288,7 @@
   __ bind(&check_frame_marker);
   __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2198,6 +2311,7 @@
 
   __ pop(r1);
   __ cmp(r0, r1);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2622,6 +2736,15 @@
 }
 
 
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallRuntime(Runtime::kMath_log, 1);
+  context()->Plug(r0);
+}
+
+
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
@@ -2642,11 +2765,12 @@
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(r0);
 }
 
@@ -2765,9 +2889,8 @@
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-
-  __ b(eq, if_true);
-  __ b(if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
@@ -2890,6 +3013,7 @@
       // Notice that the labels are swapped.
       context()->PrepareTest(&materialize_true, &materialize_false,
                              &if_false, &if_true, &fall_through);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
       context()->Plug(if_false, if_true);  // Labels swapped.
       break;
@@ -3009,14 +3133,25 @@
       __ push(r0);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
+        __ push(r0);
+        __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
       __ ldr(r1, MemOperand(sp, 0));
       __ push(r0);
       EmitKeyedPropertyLoad(prop);
     }
   }
 
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  PrepareForBailout(expr->increment(), TOS_REG);
+
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
   __ BranchOnSmi(r0, &no_conversion);
@@ -3059,6 +3194,10 @@
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
   }
   __ mov(r1, Operand(Smi::FromInt(count_value)));
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE, r1, r0);
   __ CallStub(&stub);
   __ bind(&done);
@@ -3070,6 +3209,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(r0);
         }
         // For all contexts except EffectConstant We have the result on
         // top of the stack.
@@ -3079,6 +3220,8 @@
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(r0);
       }
       break;
     case NAMED_PROPERTY: {
@@ -3086,6 +3229,7 @@
       __ pop(r1);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3100,6 +3244,7 @@
       __ pop(r2);  // Receiver.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3125,6 +3270,7 @@
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    PrepareForBailout(expr, TOS_REG);
     context()->Plug(r0);
   } else if (proxy != NULL &&
              proxy->var()->AsSlot() != NULL &&
@@ -3140,12 +3286,13 @@
     __ mov(r0, Operand(proxy->name()));
     __ Push(cp, r0);
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    Visit(expr);
+    context()->HandleExpression(expr);
   }
 }
 
@@ -3170,6 +3317,8 @@
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(left_unary->expression());
   }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
   if (check->Equals(Heap::number_symbol())) {
     __ tst(r0, Operand(kSmiTagMask));
     __ b(eq, if_true);
@@ -3273,6 +3422,7 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_JS);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
       Split(eq, if_true, if_false, fall_through);
@@ -3280,8 +3430,9 @@
 
     case Token::INSTANCEOF: {
       VisitForStackValue(expr->right());
-      InstanceofStub stub;
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       // The stub returns 0 for true.
       __ tst(r0, r0);
       Split(eq, if_true, if_false, fall_through);
@@ -3340,6 +3491,7 @@
           : NO_COMPARE_FLAGS;
       CompareStub stub(cc, strict, flags, r1, r0);
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ cmp(r0, Operand(0, RelocInfo::NONE));
       Split(cc, if_true, if_false, fall_through);
     }
@@ -3361,6 +3513,7 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ LoadRoot(r1, Heap::kNullValueRootIndex);
   __ cmp(r0, r1);
   if (expr->is_strict()) {
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 5e36d2c..e5a1bae 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -115,9 +115,6 @@
                                            Register name,
                                            Register scratch1,
                                            Register scratch2) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
   // Compute the capacity mask.
   const int kCapacityOffset = StringDictionary::kHeaderSize +
       StringDictionary::kCapacityIndex * kPointerSize;
@@ -841,15 +838,7 @@
   //  -- lr    : return address
   // -----------------------------------
 
-  // Check if the name is a string.
-  Label miss;
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &miss);
-  Condition cond = masm->IsObjectStringType(r2, r0);
-  __ b(NegateCondition(cond), &miss);
-
   GenerateCallNormal(masm, argc);
-  __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
@@ -918,6 +907,8 @@
 // Returns the code marker, or the 0 if the code is not marked.
 static inline int InlinedICSiteMarker(Address address,
                                       Address* inline_end_address) {
+  if (V8::UseCrankshaft()) return false;
+
   // If the instruction after the call site is not the pseudo instruction nop1
   // then this is not related to an inlined in-object property load. The nop1
   // instruction is located just after the call to the IC in the deferred code
@@ -951,6 +942,8 @@
 
 
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // Find the end of the inlined code for handling the load if this is an
   // inlined IC call site.
   Address inline_end_address;
@@ -1030,6 +1023,8 @@
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // Find the end of the inlined code for the store if there is an
   // inlined version of the store.
   Address inline_end_address;
@@ -1080,6 +1075,8 @@
 
 
 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   Address inline_end_address;
   if (InlinedICSiteMarker(address, &inline_end_address)
       != Assembler::PROPERTY_ACCESS_INLINED) {
@@ -1098,6 +1095,8 @@
 
 
 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   // Find the end of the inlined code for handling the store if this is an
   // inlined IC call site.
   Address inline_end_address;
@@ -1326,7 +1325,7 @@
   char_at_generator.GenerateFast(masm);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -2318,9 +2317,76 @@
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  __ Push(r1, r2, r0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+  State state = TargetState(previous_state, false, x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+  UNIMPLEMENTED();
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 16d4e50..c6eb628 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -143,16 +143,6 @@
       entry_frame_set_ = true;
     } else {
       cgen()->frame()->MergeTo(&entry_frame_);
-      // On fall through we may have to merge both ways.
-      if (direction_ != FORWARD_ONLY) {
-        // This will not need to adjust the virtual frame entries that are
-        // register allocated since that was done above and they now match.
-        // But it does need to adjust the entry_frame_ of this jump target
-        // to make it potentially less optimistic.  Later code can branch back
-        // to this jump target and we need to assert that that code does not
-        // have weaker assumptions about types.
-        entry_frame_.MergeTo(cgen()->frame());
-      }
     }
   } else {
     // If there is no current frame we must have an entry frame which we can
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
new file mode 100644
index 0000000..e31d2e1
--- /dev/null
+++ b/src/arm/lithium-arm.cc
@@ -0,0 +1,2122 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-arm.h"
+#include "arm/lithium-codegen-arm.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s ", this->Mnemonic());
+  if (HasResult()) {
+    result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) const {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  for (int i = move_operands_.length() - 1; i >= 0; --i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* from = move_operands_[i].from();
+      LOperand* to = move_operands_[i].to();
+      if (from->Equals(to)) {
+        to->PrintTo(stream);
+      } else {
+        to->PrintTo(stream);
+        stream->Add(" = ");
+        from->PrintTo(stream);
+      }
+      stream->Add("; ");
+    }
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) const {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+
+void LBinaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  left()->PrintTo(stream);
+  stream->Add(" ");
+  right()->PrintTo(stream);
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  input()->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  input()->PrintTo(stream);
+  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_object(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_smi(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_instance_type(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_cached_array_index(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) const {
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if typeof ");
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("/%s ", hydrogen()->OpName());
+  input()->PrintTo(stream);
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[r2] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) const {
+  LUnaryOperation::PrintDataTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("= class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LUnaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  input()->PrintTo(stream);
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+LChunk::LChunk(HGraph* graph)
+    : spill_slot_count_(0),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+void LChunk::Verify() const {
+  // TODO(twuerthinger): Implement verification for chunk.
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (!goto_instr->include_stack_check() &&
+          label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LGap* gap = new LGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+  return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - graph()->info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + graph()->info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
+                           LOperand* marker_operand)
+    : nodes_(4),
+      identified_cycles_(4),
+      result_(4),
+      marker_operand_(marker_operand),
+      next_visited_id_(0) {
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i]);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start) {
+  ZoneList<LOperand*> circle_operands(8);
+  circle_operands.Add(marker_operand_);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    circle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  circle_operands.Add(marker_operand_);
+
+  for (int i = circle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = circle_operands[i];
+    LOperand* to = circle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a circle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
+                                               DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  instr->set_environment(CreateEnvironment(hydrogen_env));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instructions_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instructions_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  allocator_->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+LOperand* LChunkBuilder::Temp() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+
+  LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+  LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+  return DefineSameAsFirst(new LBitI(op, left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+  HValue* right_value = instr->OperandAt(1);
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseRegister(right_value);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool can_deopt = (op == Token::SHR && constant_value == 0);
+  if (can_deopt) {
+    bool can_truncate = true;
+    for (int i = 0; i < instr->uses()->length(); i++) {
+      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+        can_truncate = false;
+        break;
+      }
+    }
+    can_deopt = !can_truncate;
+  }
+
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
+  if (can_deopt) AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, r1);
+  LOperand* right_operand = UseFixed(right, r0);
+  LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    if (FLAG_trace_environment) {
+      PrintF("Process instruction %d\n", current->id());
+    }
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  allocator_->BeginInstruction();
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    if (current->IsBranch()) {
+      instr->set_hydrogen_value(HBranch::cast(current)->value());
+    } else {
+      instr->set_hydrogen_value(current);
+    }
+
+    int index = chunk_->AddInstruction(instr, current_block_);
+    allocator_->SummarizeInstruction(index);
+  } else {
+    // This instruction should be omitted.
+    allocator_->OmitInstruction();
+  }
+  current_instruction_ = old_current;
+}
+
+
+void LEnvironment::WriteTranslation(LCodeGen* cgen,
+                                    Translation* translation) const {
+  if (this == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - parameter_count();
+
+  outer()->WriteTranslation(cgen, translation);
+  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
+  translation->BeginFrame(ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (spilled_registers_ != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          spilled_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_registers_[value->index()],
+                               HasTaggedValueAt(i));
+      } else if (value->IsDoubleRegister() &&
+                 spilled_double_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_double_registers_[value->index()],
+                               false);
+      }
+    }
+
+    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
+  }
+}
+
+
+void LEnvironment::PrintTo(StringStream* stream) const {
+  stream->Add("[id=%d|", ast_id());
+  stream->Add("[parameters=%d|", parameter_count());
+  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
+    }
+  }
+  stream->Add("]");
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->values()->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
+  for (int i = 0; i < value_count; ++i) {
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument(argument_index++);
+    } else {
+      op = UseOrConstant(value);
+      if (op->IsUnallocated()) {
+        LUnallocated* unalloc = LUnallocated::cast(op);
+        unalloc->set_policy(LUnallocated::ANY);
+      }
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                                   instr->include_stack_check());
+  if (instr->include_stack_check())  result = AssignPointerMap(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  ASSERT(first != NULL && second != NULL);
+  int first_id = first->block_id();
+  int second_id = second->block_id();
+
+  if (v->EmitAtUses()) {
+    if (v->IsClassOfTest()) {
+      HClassOfTest* compare = HClassOfTest::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                       TempRegister(),
+                                       TempRegister(),
+                                       first_id,
+                                       second_id);
+    } else if (v->IsCompare()) {
+      HCompare* compare = HCompare::cast(v);
+      Token::Value op = compare->token();
+      HValue* left = compare->left();
+      HValue* right = compare->right();
+      if (left->representation().IsInteger32()) {
+        ASSERT(right->representation().IsInteger32());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseOrConstantAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   false);
+      } else if (left->representation().IsDouble()) {
+        ASSERT(right->representation().IsDouble());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseRegisterAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   true);
+      } else {
+        ASSERT(left->representation().IsTagged());
+        ASSERT(right->representation().IsTagged());
+        bool reversed = op == Token::GT || op == Token::LTE;
+        LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
+        LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
+        LInstruction* result = new LCmpTAndBranch(left_operand,
+                                                  right_operand,
+                                                  first_id,
+                                                  second_id);
+        return MarkAsCall(result, instr);
+      }
+    } else if (v->IsIsSmi()) {
+      HIsSmi* compare = HIsSmi::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LIsSmiAndBranch(Use(compare->value()),
+                                 first_id,
+                                 second_id);
+    } else if (v->IsHasInstanceType()) {
+      HHasInstanceType* compare = HHasInstanceType::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+                                           TempRegister(),
+                                           first_id,
+                                           second_id);
+    } else if (v->IsHasCachedArrayIndex()) {
+      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasCachedArrayIndexAndBranch(
+          UseRegisterAtStart(compare->value()), first_id, second_id);
+    } else if (v->IsIsNull()) {
+      HIsNull* compare = HIsNull::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      // We only need a temp register for non-strict compare.
+      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+                                  compare->is_strict(),
+                                  temp,
+                                  first_id,
+                                  second_id);
+    } else if (v->IsIsObject()) {
+      HIsObject* compare = HIsObject::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
+                                    temp1,
+                                    temp2,
+                                    first_id,
+                                    second_id);
+    } else if (v->IsCompareJSObjectEq()) {
+      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                         UseRegisterAtStart(compare->right()),
+                                         first_id,
+                                         second_id);
+    } else if (v->IsInstanceOf()) {
+      HInstanceOf* instance_of = HInstanceOf::cast(v);
+      LInstruction* result =
+          new LInstanceOfAndBranch(Use(instance_of->left()),
+                                   Use(instance_of->right()),
+                                   first_id,
+                                   second_id);
+      return MarkAsCall(result, instr);
+    } else if (v->IsTypeofIs()) {
+      HTypeofIs* typeof_is = HTypeofIs::cast(v);
+      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
+                                    first_id,
+                                    second_id);
+    } else {
+      if (v->IsConstant()) {
+        if (HConstant::cast(v)->handle()->IsTrue()) {
+          return new LGoto(first_id);
+        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+          return new LGoto(second_id);
+        }
+      }
+      Abort("Undefined compare before branch");
+      return NULL;
+    }
+  }
+  return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+    HCompareMapAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  return new LCmpMapAndBranch(value,
+                              instr->map(),
+                              first->block_id(),
+                              second->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstruction* result =
+      new LInstanceOf(UseFixed(instr->left(), r1),
+                      UseFixed(instr->right(), r0));
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+  LOperand* receiver = UseFixed(instr->receiver(), r0);
+  LOperand* length = UseRegisterAtStart(instr->length());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LInstruction* result = new LApplyArguments(function,
+                                             receiver,
+                                             length,
+                                             elements);
+  return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = Use(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  return DefineAsRegister(new LGlobalObject);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  return DefineAsRegister(new LGlobalReceiver);
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  BuiltinFunctionId op = instr->op();
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LUnaryMathOperation(input);
+  switch (op) {
+    case kMathAbs:
+      return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+    case kMathFloor:
+      return AssignEnvironment(DefineAsRegister(result));
+    case kMathSqrt:
+      return DefineSameAsFirst(result);
+    case kMathPowHalf:
+      Abort("MathPowHalf LUnaryMathOperation not implemented");
+      return NULL;
+    case kMathLog:
+      Abort("MathLog LUnaryMathOperation not implemented");
+      return NULL;
+    case kMathCos:
+      Abort("MathCos LUnaryMathOperation not implemented");
+      return NULL;
+    case kMathSin:
+      Abort("MathSin LUnaryMathOperation not implemented");
+      return NULL;
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  UseFixed(instr->key(), r2);
+  return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), r1);
+  argument_count_ -= instr->argument_count();
+  LInstruction* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(r1);
+    LOperand* value = UseFixed(instr->left(), r0);
+    LOperand* divisor = UseRegister(instr->right());
+    return AssignEnvironment(DefineFixed(new LDivI(value, divisor), r0));
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(r1);
+    LOperand* value = UseFixed(instr->left(), r0);
+    LOperand* divisor = UseRegister(instr->right());
+    LInstruction* result = DefineFixed(new LModI(value, divisor), r1);
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), d1);
+    LOperand* right = UseFixedDouble(instr->right(), d2);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, d1), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new LMulI(left, right, temp);
+    return AssignEnvironment(DefineSameAsFirst(mul));
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineSameAsFirst(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  Abort("LPower instruction not implemented on ARM");
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+  Token::Value op = instr->token();
+  if (instr->left()->representation().IsInteger32()) {
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, false));
+  } else if (instr->left()->representation().IsDouble()) {
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, true));
+  } else {
+    bool reversed = (op == Token::GT || op == Token::LTE);
+    LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+    LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+    LInstruction* result = new LCmpT(left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+    HCompareJSObjectEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LInstruction* result = new LCmpJSObjectEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsNull(value,
+                                      instr->is_strict()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsObject(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseAtStart(instr->value());
+
+  return DefineAsRegister(new LIsSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LHasInstanceType(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+    HHasCachedArrayIndex* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LHasCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseTempRegister(instr->value());
+
+  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
+  LOperand* array = NULL;
+  LOperand* temporary = NULL;
+
+  if (instr->value()->IsLoadElements()) {
+    array = UseRegisterAtStart(instr->value());
+  } else {
+    array = UseRegister(instr->value());
+    temporary = TempRegister();
+  }
+
+  LInstruction* result = new LArrayLength(array, temporary);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LInstruction* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), r0);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      LInstruction* res = NULL;
+      if (needs_check) {
+        res = DefineSameAsFirst(new LTaggedToI(value, FixedTemp(d1)));
+      } else {
+        res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      }
+      if (needs_check) {
+        res = AssignEnvironment(res);
+      }
+      return res;
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+
+      // Make sure that the temp and result_temp registers are
+      // different.
+      LUnallocated* result_temp = TempRegister();
+      LInstruction* result = new LNumberTagD(value, temp1, temp2);
+      Define(result, result_temp);
+      return AssignPointerMap(result);
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LDoubleToI(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LInstruction* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      LOperand* value = Use(instr->value());
+      return DefineAsRegister(new LInteger32ToDouble(value));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, eq));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = new LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      new LCheckPrototypeMaps(temp,
+                              instr->holder(),
+                              instr->receiver_map());
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, ne));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), r0));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    int32_t value = instr->Integer32Value();
+    return DefineAsRegister(new LConstantI(value));
+  } else if (r.IsDouble()) {
+    double value = instr->DoubleValue();
+    return DefineAsRegister(new LConstantD(value));
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT(instr->handle()));
+  } else {
+    Abort("unsupported constant of type double");
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+  LInstruction* result = new LLoadGlobal();
+  return instr->check_hole_value()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), r0);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineSameAsFirst(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  Representation r = instr->representation();
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* load_result = NULL;
+  // Double needs an extra temp, because the result is converted from heap
+  // number to a double register.
+  if (r.IsDouble()) load_result = TempRegister();
+  LInstruction* result = new LLoadKeyedFastElement(obj,
+                                                   key,
+                                                   load_result);
+  if (r.IsDouble()) {
+    result = DefineAsRegister(result);
+  } else {
+    result = DefineSameAsFirst(result);
+  }
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), r1);
+  LOperand* key = UseFixed(instr->key(), r0);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), r0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), r2);
+  LOperand* key = UseFixed(instr->key(), r1);
+  LOperand* val = UseFixed(instr->value(), r0);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = !instr->value()->type().IsSmi();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+      ? TempRegister() : NULL;
+
+  return new LStoreNamedField(obj,
+                              instr->name(),
+                              val,
+                              instr->is_in_object(),
+                              instr->offset(),
+                              temp,
+                              needs_write_barrier,
+                              instr->transition());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), r1);
+  LOperand* val = UseFixed(instr->value(), r0);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LInstruction* result = new LDeleteProperty(Use(instr->object()),
+                                             UseOrConstant(instr->key()));
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object (we bail out in all other
+  // cases).
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = Use(instr->index());
+  LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
+  return DefineAsRegister(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LInstruction* result = new LTypeof(Use(instr->value()));
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  if (FLAG_trace_environment) {
+    PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
+           instr->ast_id(),
+           instr->id());
+    env->PrintToStd();
+  }
+  ASSERT(env->values()->length() == instr->environment_height());
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instructions_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               false,
+                                               undefined);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+void LPointerMap::RecordPointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op);
+}
+
+
+void LPointerMap::PrintTo(StringStream* stream) const {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("} @%d", position());
+}
+
+} }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
new file mode 100644
index 0000000..41209c6
--- /dev/null
+++ b/src/arm/lithium-arm.h
@@ -0,0 +1,2115 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_ARM_H_
+#define V8_ARM_LITHIUM_ARM_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+
+// Type hierarchy:
+//
+// LInstruction
+//   LAccessArgumentsAt
+//   LArgumentsElements
+//   LArgumentsLength
+//   LBinaryOperation
+//     LAddI
+//     LApplyArguments
+//     LArithmeticD
+//     LArithmeticT
+//     LBitI
+//     LBoundsCheck
+//     LCmpID
+//     LCmpIDAndBranch
+//     LCmpJSObjectEq
+//     LCmpJSObjectEqAndBranch
+//     LCmpT
+//     LDivI
+//     LInstanceOf
+//     LInstanceOfAndBranch
+//     LLoadKeyedFastElement
+//     LLoadKeyedGeneric
+//     LModI
+//     LMulI
+//     LShiftI
+//     LSubI
+//   LCallConstantFunction
+//   LCallFunction
+//   LCallGlobal
+//   LCallKeyed
+//   LCallKnownGlobal
+//   LCallNamed
+//   LCallRuntime
+//   LCallStub
+//   LConstant
+//     LConstantD
+//     LConstantI
+//     LConstantT
+//   LDeoptimize
+//   LFunctionLiteral
+//   LGlobalObject
+//   LGlobalReceiver
+//   LLabel
+//   LLayzBailout
+//   LLoadGlobal
+//   LMaterializedLiteral
+//     LArrayLiteral
+//     LObjectLiteral
+//     LRegExpLiteral
+//   LOsrEntry
+//   LParameter
+//   LStackCheck
+//   LStoreKeyed
+//     LStoreKeyedFastElement
+//     LStoreKeyedGeneric
+//   LStoreNamed
+//     LStoreNamedField
+//     LStoreNamedGeneric
+//   LUnaryOperation
+//     LArrayLength
+//     LBitNotI
+//     LBranch
+//     LCallNew
+//     LCheckFunction
+//     LCheckInstanceType
+//     LCheckMap
+//     LCheckPrototypeMaps
+//     LCheckSmi
+//     LClassOfTest
+//     LClassOfTestAndBranch
+//     LDeleteProperty
+//     LDoubleToI
+//     LHasCachedArrayIndex
+//     LHasCachedArrayIndexAndBranch
+//     LHasInstanceType
+//     LHasInstanceTypeAndBranch
+//     LInteger32ToDouble
+//     LIsNull
+//     LIsNullAndBranch
+//     LIsObject
+//     LIsObjectAndBranch
+//     LIsSmi
+//     LIsSmiAndBranch
+//     LLoadNamedField
+//     LLoadNamedGeneric
+//     LNumberTagD
+//     LNumberTagI
+//     LPushArgument
+//     LReturn
+//     LSmiTag
+//     LStoreGlobal
+//     LTaggedToI
+//     LThrow
+//     LTypeof
+//     LTypeofIs
+//     LTypeofIsAndBranch
+//     LUnaryMathOperation
+//     LValueOf
+//   LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(BinaryOperation)                            \
+  V(Constant)                                   \
+  V(Call)                                       \
+  V(MaterializedLiteral)                        \
+  V(StoreKeyed)                                 \
+  V(StoreNamed)                                 \
+  V(UnaryOperation)                             \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLength)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(CmpID)                                      \
+  V(CmpIDAndBranch)                             \
+  V(CmpJSObjectEq)                              \
+  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(CmpTAndBranch)                              \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(FunctionLiteral)                            \
+  V(Gap)                                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(InstanceOf)                                 \
+  V(InstanceOfAndBranch)                        \
+  V(Integer32ToDouble)                          \
+  V(IsNull)                                     \
+  V(IsNullAndBranch)                            \
+  V(IsObject)                                   \
+  V(IsObjectAndBranch)                          \
+  V(IsSmi)                                      \
+  V(IsSmiAndBranch)                             \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadElements)                               \
+  V(LoadGlobal)                                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadNamedField)                             \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(Parameter)                                  \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreGlobal)                                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(Throw)                                      \
+  V(Typeof)                                     \
+  V(TypeofIs)                                   \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type)                \
+  virtual bool Is##type() const { return true; } \
+  static L##type* cast(LInstruction* instr) {    \
+    ASSERT(instr->Is##type());                   \
+    return reinterpret_cast<L##type*>(instr);    \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
+  virtual void CompileToNative(LCodeGen* generator);        \
+  virtual const char* Mnemonic() const { return mnemonic; } \
+  DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction()
+      : hydrogen_value_(NULL) { }
+  virtual ~LInstruction() { }
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const { }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_.set(env); }
+  LEnvironment* environment() const { return environment_.get(); }
+  bool HasEnvironment() const { return environment_.is_set(); }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_result(LOperand* operand) { result_.set(operand); }
+  LOperand* result() const { return result_.get(); }
+  bool HasResult() const { return result_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
+  }
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
+  }
+
+ private:
+  SetOncePointer<LEnvironment> environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  SetOncePointer<LOperand> result_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+};
+
+
+class LGapNode;
+
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
+  const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  LOperand* marker_operand_;
+  int next_visited_id_;
+  int bailout_after_ast_id_;
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() : move_operands_(4) { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    move_operands_.Add(LMoveOperands(from, to));
+  }
+
+  bool IsRedundant() const;
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+  void PrintDataTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LGoto: public LInstruction {
+ public:
+  LGoto(int block_id, bool include_stack_check = false)
+    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+  bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+  int block_id_;
+  bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LInstruction {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+class LUnaryOperation: public LInstruction {
+ public:
+  explicit LUnaryOperation(LOperand* input) : input_(input) { }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+  LOperand* input() const { return input_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* input_;
+};
+
+
+class LBinaryOperation: public LInstruction {
+ public:
+  LBinaryOperation(LOperand* left, LOperand* right)
+      : left_(left), right_(right) { }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+  LOperand* left() const { return left_; }
+  LOperand* right() const { return right_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* left_;
+  LOperand* right_;
+};
+
+
+class LApplyArguments: public LBinaryOperation {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements)
+      : LBinaryOperation(function, receiver),
+        length_(length),
+        elements_(elements) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() const { return left(); }
+  LOperand* receiver() const { return right(); }
+  LOperand* length() const { return length_; }
+  LOperand* elements() const { return elements_; }
+
+ private:
+  LOperand* length_;
+  LOperand* elements_;
+};
+
+
+class LAccessArgumentsAt: public LInstruction {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
+      : arguments_(arguments), length_(length), index_(index) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() const { return arguments_; }
+  LOperand* length() const { return length_; }
+  LOperand* index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* arguments_;
+  LOperand* length_;
+  LOperand* index_;
+};
+
+
+class LArgumentsLength: public LUnaryOperation {
+ public:
+  explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LInstruction {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LBinaryOperation {
+ public:
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LBinaryOperation {
+ public:
+  LDivI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LBinaryOperation {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp)
+      : LBinaryOperation(left, right), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCmpID: public LBinaryOperation {
+ public:
+  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
+      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+
+  Token::Value op() const { return op_; }
+  bool is_double() const { return is_double_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+
+ private:
+  Token::Value op_;
+  bool is_double_;
+};
+
+
+class LCmpIDAndBranch: public LCmpID {
+ public:
+  LCmpIDAndBranch(Token::Value op,
+                  LOperand* left,
+                  LOperand* right,
+                  int true_block_id,
+                  int false_block_id,
+                  bool is_double)
+      : LCmpID(op, left, right, is_double),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LUnaryMathOperation: public LUnaryOperation {
+ public:
+  explicit LUnaryMathOperation(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LBinaryOperation {
+ public:
+  LCmpJSObjectEq(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+ public:
+  LCmpJSObjectEqAndBranch(LOperand* left,
+                          LOperand* right,
+                          int true_block_id,
+                          int false_block_id)
+      : LCmpJSObjectEq(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+                               "cmp-jsobject-eq-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsNull: public LUnaryOperation {
+ public:
+  LIsNull(LOperand* value, bool is_strict)
+      : LUnaryOperation(value), is_strict_(is_strict) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+
+  bool is_strict() const { return is_strict_; }
+
+ private:
+  bool is_strict_;
+};
+
+
+class LIsNullAndBranch: public LIsNull {
+ public:
+  LIsNullAndBranch(LOperand* value,
+                   bool is_strict,
+                   LOperand* temp,
+                   int true_block_id,
+                   int false_block_id)
+      : LIsNull(value, is_strict),
+        temp_(temp),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsObject: public LUnaryOperation {
+ public:
+  LIsObject(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LIsObjectAndBranch: public LIsObject {
+ public:
+  LIsObjectAndBranch(LOperand* value,
+                     LOperand* temp,
+                     LOperand* temp2,
+                     int true_block_id,
+                     int false_block_id)
+      : LIsObject(value, temp),
+        temp2_(temp2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp2() const { return temp2_; }
+
+ private:
+  LOperand* temp2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsSmi: public LUnaryOperation {
+ public:
+  explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LIsSmi {
+ public:
+  LIsSmiAndBranch(LOperand* value,
+                  int true_block_id,
+                  int false_block_id)
+      : LIsSmi(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasInstanceType: public LUnaryOperation {
+ public:
+  explicit LHasInstanceType(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+  InstanceType TestType();  // The type to test against when generating code.
+  Condition BranchCondition();  // The branch condition for 'true'.
+};
+
+
+class LHasInstanceTypeAndBranch: public LHasInstanceType {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value,
+                            LOperand* temporary,
+                            int true_block_id,
+                            int false_block_id)
+      : LHasInstanceType(value),
+        temp_(temporary),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasCachedArrayIndex: public LUnaryOperation {
+ public:
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value,
+                                int true_block_id,
+                                int false_block_id)
+      : LHasCachedArrayIndex(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LClassOfTest: public LUnaryOperation {
+ public:
+  LClassOfTest(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temporary_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* temporary() { return temporary_; }
+
+ private:
+  LOperand *temporary_;
+};
+
+
+class LClassOfTestAndBranch: public LClassOfTest {
+ public:
+  LClassOfTestAndBranch(LOperand* value,
+                        LOperand* temporary,
+                        LOperand* temporary2,
+                        int true_block_id,
+                        int false_block_id)
+      : LClassOfTest(value, temporary),
+        temporary2_(temporary2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+  LOperand* temporary2() { return temporary2_; }
+
+ private:
+  LOperand* temporary2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpT: public LBinaryOperation {
+ public:
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LCmpT {
+ public:
+  LCmpTAndBranch(LOperand* left,
+                 LOperand* right,
+                 int true_block_id,
+                 int false_block_id)
+      : LCmpT(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOf: public LBinaryOperation {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LInstanceOf {
+ public:
+  LInstanceOfAndBranch(LOperand* left,
+                       LOperand* right,
+                       int true_block_id,
+                       int false_block_id)
+      : LInstanceOf(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length)
+      : LBinaryOperation(index, length) { }
+
+  LOperand* index() const { return left(); }
+  LOperand* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LBinaryOperation {
+ public:
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
+ private:
+  Token::Value op_;
+};
+
+
+class LShiftI: public LBinaryOperation {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LBinaryOperation {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstant: public LInstruction {
+  DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant {
+ public:
+  explicit LConstantI(int32_t value) : value_(value) { }
+  int32_t value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+
+ private:
+  int32_t value_;
+};
+
+
+class LConstantD: public LConstant {
+ public:
+  explicit LConstantD(double value) : value_(value) { }
+  double value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+  double value_;
+};
+
+
+class LConstantT: public LConstant {
+ public:
+  explicit LConstantT(Handle<Object> value) : value_(value) { }
+  Handle<Object> value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+
+ private:
+  Handle<Object> value_;
+};
+
+
+class LBranch: public LUnaryOperation {
+ public:
+  LBranch(LOperand* input, int true_block_id, int false_block_id)
+      : LUnaryOperation(input),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Value)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpMapAndBranch: public LUnaryOperation {
+ public:
+  LCmpMapAndBranch(LOperand* value,
+                   Handle<Map> map,
+                   int true_block_id,
+                   int false_block_id)
+      : LUnaryOperation(value),
+        map_(map),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return map_; }
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  Handle<Map> map_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LArrayLength: public LUnaryOperation {
+ public:
+  LArrayLength(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LValueOf: public LUnaryOperation {
+ public:
+  LValueOf(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LThrow: public LUnaryOperation {
+ public:
+  explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LUnaryOperation {
+ public:
+  explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LBinaryOperation {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LArithmeticD: public LBinaryOperation {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LBinaryOperation {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LUnaryOperation {
+ public:
+  explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LUnaryOperation {
+ public:
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LUnaryOperation {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() const { return input(); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadElements: public LUnaryOperation {
+ public:
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LBinaryOperation {
+ public:
+  LLoadKeyedFastElement(LOperand* elements,
+                        LOperand* key,
+                        LOperand* load_result)
+      : LBinaryOperation(elements, key),
+        load_result_(load_result) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() const { return left(); }
+  LOperand* key() const { return right(); }
+  LOperand* load_result() const { return load_result_; }
+
+ private:
+  LOperand* load_result_;
+};
+
+
+class LLoadKeyedGeneric: public LBinaryOperation {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key)
+      : LBinaryOperation(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LUnaryOperation {
+ public:
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LPushArgument: public LUnaryOperation {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LUnaryOperation {
+ public:
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LUnaryOperation {
+ public:
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LUnaryOperation {
+ public:
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LUnaryOperation {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2)
+      : LUnaryOperation(value), temp1_(temp1), temp2_(temp2) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+
+  LOperand* temp1() const { return temp1_; }
+  LOperand* temp2() const { return temp2_; }
+
+ private:
+  LOperand* temp1_;
+  LOperand* temp2_;
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LUnaryOperation {
+ public:
+  explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LUnaryOperation {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LSmiTag: public LUnaryOperation {
+ public:
+  explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LUnaryOperation {
+ public:
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LUnaryOperation {
+ public:
+  LSmiUntag(LOperand* use, bool needs_check)
+      : LUnaryOperation(use), needs_check_(needs_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamed: public LInstruction {
+ public:
+  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
+      : object_(obj), name_(name), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  Handle<Object> name() const { return name_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  Handle<Object> name_;
+  LOperand* value_;
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   Handle<Object> name,
+                   LOperand* val,
+                   bool in_object,
+                   int offset,
+                   LOperand* temp,
+                   bool needs_write_barrier,
+                   Handle<Map> transition)
+      : LStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset),
+        temp_(temp),
+        needs_write_barrier_(needs_write_barrier),
+        transition_(transition) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+
+  bool is_in_object() { return is_in_object_; }
+  int offset() { return offset_; }
+  LOperand* temp() { return temp_; }
+  bool needs_write_barrier() { return needs_write_barrier_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  LOperand* temp_;
+  bool needs_write_barrier_;
+  Handle<Map> transition_;
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+  LStoreNamedGeneric(LOperand* obj,
+                     Handle<Object> name,
+                     LOperand* val)
+      : LStoreNamed(obj, name, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+};
+
+
+class LStoreKeyed: public LInstruction {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
+      : object_(obj), key_(key), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  LOperand* key() const { return key_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  LOperand* key_;
+  LOperand* value_;
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LUnaryOperation {
+ public:
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LUnaryOperation {
+ public:
+  LCheckInstanceType(LOperand* use, LOperand* temp)
+      : LUnaryOperation(use), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckMap: public LUnaryOperation {
+ public:
+  explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LInstruction {
+ public:
+  LCheckPrototypeMaps(LOperand* temp,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : temp_(temp),
+        holder_(holder),
+        receiver_map_(receiver_map) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+
+  LOperand* temp() const { return temp_; }
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+ private:
+  LOperand* temp_;
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class LCheckSmi: public LUnaryOperation {
+ public:
+  LCheckSmi(LOperand* use, Condition condition)
+      : LUnaryOperation(use), condition_(condition) { }
+
+  Condition condition() const { return condition_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const {
+    return (condition_ == eq) ? "check-non-smi" : "check-smi";
+  }
+
+ private:
+  Condition condition_;
+};
+
+
+class LMaterializedLiteral: public LInstruction {
+ public:
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+};
+
+
+class LArrayLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LUnaryOperation {
+ public:
+  explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LUnaryOperation {
+ public:
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+};
+
+
+class LTypeofIsAndBranch: public LTypeofIs {
+ public:
+  LTypeofIsAndBranch(LOperand* value,
+                     int true_block_id,
+                     int false_block_id)
+      : LTypeofIs(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LDeleteProperty: public LBinaryOperation {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position)
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  int position() const { return position_; }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    ASSERT(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op);
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  int position_;
+  int lithium_position_;
+};
+
+
+class LEnvironment: public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               int ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer)
+      : closure_(closure),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        parameter_count_(parameter_count),
+        values_(value_count),
+        representations_(value_count),
+        spilled_registers_(NULL),
+        spilled_double_registers_(NULL),
+        outer_(outer) {
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  int ast_id() const { return ast_id_; }
+  int parameter_count() const { return parameter_count_; }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+
+  void AddValue(LOperand* operand, Representation representation) {
+    values_.Add(operand);
+    representations_.Add(representation);
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return representations_[index].IsTagged();
+  }
+
+  void Register(int deoptimization_index, int translation_index) {
+    ASSERT(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void SetSpilledRegisters(LOperand** registers,
+                           LOperand** double_registers) {
+    spilled_registers_ = registers;
+    spilled_double_registers_ = double_registers;
+  }
+
+  // Emit frame translation commands for this environment.
+  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
+
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  Handle<JSFunction> closure_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  int ast_id_;
+  int parameter_count_;
+  ZoneList<LOperand*> values_;
+  ZoneList<Representation> representations_;
+
+  // Allocation index indexed arrays of spill slot operands for registers
+  // that are also in spill slots at an OSR entry.  NULL for environments
+  // that do not correspond to an OSR entry.
+  LOperand** spilled_registers_;
+  LOperand** spilled_double_registers_;
+
+  LEnvironment* outer_;
+};
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph);
+
+  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const {
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
+  }
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
+  }
+
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
+  }
+
+  void Verify() const;
+
+ private:
+  int spill_slot_count_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instructions_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  LOperand* Use(HValue* value, LUnallocated* operand);
+  LOperand* UseFixed(HValue* value, Register fixed_register);
+  LOperand* UseFixedDouble(HValue* value, DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  LOperand* UseRegister(HValue* value);
+  LOperand* UseRegisterAtStart(HValue* value);
+
+  // A value in a register that may be trashed.
+  LOperand* UseTempRegister(HValue* value);
+  LOperand* Use(HValue* value);
+  LOperand* UseAtStart(HValue* value);
+  LOperand* UseOrConstant(HValue* value);
+  LOperand* UseOrConstantAtStart(HValue* value);
+  LOperand* UseRegisterOrConstant(HValue* value);
+  LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LInstruction* instr, LUnallocated* result);
+  LInstruction* Define(LInstruction* instr);
+  LInstruction* DefineAsRegister(LInstruction* instr);
+  LInstruction* DefineAsSpilled(LInstruction* instr, int index);
+  LInstruction* DefineSameAsAny(LInstruction* instr);
+  LInstruction* DefineSameAsFirst(LInstruction* instr);
+  LInstruction* DefineFixed(LInstruction* instr, Register reg);
+  LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+  // Temporary operand that may be a memory location.
+  LOperand* Temp();
+  // Temporary operand that must be in a register.
+  LUnallocated* TempRegister();
+  LOperand* FixedTemp(Register reg);
+  LOperand* FixedTemp(DoubleRegister reg);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instructions_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_LITHIUM_ARM_H_
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
new file mode 100644
index 0000000..dfc4891
--- /dev/null
+++ b/src/arm/lithium-codegen-arm.cc
@@ -0,0 +1,2172 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "arm/lithium-codegen-arm.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public PostCallGenerator {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     int deoptimization_index)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deoptimization_index_(deoptimization_index) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void Generate() {
+    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope1(VFP3);
+  CpuFeatures::Scope scope2(ARMv7);
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(StackSlotCount());
+  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop_at");
+  }
+#endif
+
+  // r1: Callee's JS function.
+  // cp: Callee's context.
+  // fp: Caller's frame pointer.
+  // lr: Caller's pc.
+
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  __ add(fp, sp, Operand(2 * kPointerSize));  // Adjust FP to point to saved FP.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = StackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ mov(r0, Operand(slots));
+      __ mov(r2, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ push(r2);
+      __ sub(r0, r0, Operand(1), SetCC);
+      __ b(ne, &loop);
+    } else {
+      __ sub(sp,  sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+    LDeferredCode* code = deferred_[i];
+    __ bind(code->entry());
+    code->Generate();
+    __ jmp(code->exit());
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), StackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    __ mov(scratch, ToOperand(op));
+    return scratch;
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    __ ldr(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                SwVfpRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
+      __ vmov(flt_scratch, ip);
+      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort("unsupported double immediate");
+    } else if (r.IsTagged()) {
+      Abort("unsupported tagged immediate");
+    }
+  } else if (op->IsStackSlot() || op->IsArgument()) {
+    // TODO(regis): Why is vldr not taking a MemOperand?
+    // __ vldr(dbl_scratch, ToMemOperand(op));
+    MemOperand mem_op = ToMemOperand(op);
+    __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    Handle<Object> literal = chunk_->LookupLiteral(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      ASSERT(literal->IsNumber());
+      return Operand(static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort("ToOperand Unsupported double immediate.");
+    }
+    ASSERT(r.IsTagged());
+    return Operand(literal);
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort("ToOperand IsDoubleRegister unimplemented");
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  // TODO(regis): Revisit.
+  ASSERT(!op->IsRegister());
+  ASSERT(!op->IsDoubleRegister());
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return MemOperand(fp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return MemOperand(fp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = StackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  if (instr != NULL) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    __ Call(code, mode);
+    RegisterLazyDeoptimization(instr);
+  } else {
+    LPointerMap no_pointers(0);
+    RecordPosition(no_pointers.position());
+    __ Call(code, mode);
+    RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  // Runtime calls to Throw are not supposed to ever return at the
+  // call site, so don't register lazy deoptimization for these. We do
+  // however have to record a safepoint since throwing exceptions can
+  // cause garbage collections.
+  if (!instr->IsThrow()) {
+    RegisterLazyDeoptimization(instr);
+  } else {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  RecordSafepoint(instr->pointer_map(),
+                  deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    environment->WriteTranslation(this, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    environment->Register(deoptimization_index, translation.index());
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM.
+
+  if (FLAG_deopt_every_n_times == 1 &&
+      info_->shared_info()->opt_count() == id) {
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+    return;
+  }
+
+  if (cc == no_condition) {
+    if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
+    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    if (FLAG_trap_on_deopt) {
+      Label done;
+      __ b(&done, NegateCondition(cc));
+      __ stop("trap_on_deopt");
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+      __ bind(&done);
+    } else {
+      __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
+    }
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      Factory::NewDeoptimizationInputData(length, TENURED);
+
+  data->SetTranslationByteArray(*translations_.CreateByteArray());
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+                                                    deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegisters(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register cp always contains a pointer to the context.
+  safepoint.DefinePointerRegister(cp);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  // d0 must always be a scratch register.
+  DoubleRegister dbl_scratch = d0;
+  LUnallocated marker_operand(LUnallocated::NONE);
+
+  Register core_scratch = r9;
+  bool destroys_core_scratch = false;
+
+  LGapResolver resolver(move->move_operands(), &marker_operand);
+  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  for (int i = moves->length() - 1; i >= 0; --i) {
+    LMoveOperands move = moves->at(i);
+    LOperand* from = move.from();
+    LOperand* to = move.to();
+    ASSERT(!from->IsDoubleRegister() ||
+           !ToDoubleRegister(from).is(dbl_scratch));
+    ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
+    ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
+    ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
+    if (from == &marker_operand) {
+      if (to->IsRegister()) {
+        __ mov(ToRegister(to), core_scratch);
+        ASSERT(destroys_core_scratch);
+      } else if (to->IsStackSlot()) {
+        __ str(core_scratch, ToMemOperand(to));
+        ASSERT(destroys_core_scratch);
+      } else if (to->IsDoubleRegister()) {
+        __ vmov(ToDoubleRegister(to), dbl_scratch);
+      } else {
+        ASSERT(to->IsDoubleStackSlot());
+        // TODO(regis): Why is vstr not taking a MemOperand?
+        // __ vstr(dbl_scratch, ToMemOperand(to));
+        MemOperand to_operand = ToMemOperand(to);
+        __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
+      }
+    } else if (to == &marker_operand) {
+      if (from->IsRegister() || from->IsConstantOperand()) {
+        __ mov(core_scratch, ToOperand(from));
+        destroys_core_scratch = true;
+      } else if (from->IsStackSlot()) {
+        __ ldr(core_scratch, ToMemOperand(from));
+        destroys_core_scratch = true;
+      } else if (from->IsDoubleRegister()) {
+        __ vmov(dbl_scratch, ToDoubleRegister(from));
+      } else {
+        ASSERT(from->IsDoubleStackSlot());
+        // TODO(regis): Why is vldr not taking a MemOperand?
+        // __ vldr(dbl_scratch, ToMemOperand(from));
+        MemOperand from_operand = ToMemOperand(from);
+        __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
+      }
+    } else if (from->IsConstantOperand()) {
+      if (to->IsRegister()) {
+        __ mov(ToRegister(to), ToOperand(from));
+      } else {
+        ASSERT(to->IsStackSlot());
+        __ mov(ip, ToOperand(from));
+        __ str(ip, ToMemOperand(to));
+      }
+    } else if (from->IsRegister()) {
+      if (to->IsRegister()) {
+        __ mov(ToRegister(to), ToOperand(from));
+      } else {
+        ASSERT(to->IsStackSlot());
+        __ str(ToRegister(from), ToMemOperand(to));
+      }
+    } else if (to->IsRegister()) {
+      ASSERT(from->IsStackSlot());
+      __ ldr(ToRegister(to), ToMemOperand(from));
+    } else if (from->IsStackSlot()) {
+      ASSERT(to->IsStackSlot());
+      __ ldr(ip, ToMemOperand(from));
+      __ str(ip, ToMemOperand(to));
+    } else if (from->IsDoubleRegister()) {
+      if (to->IsDoubleRegister()) {
+      __ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
+      } else {
+        ASSERT(to->IsDoubleStackSlot());
+        // TODO(regis): Why is vstr not taking a MemOperand?
+        // __ vstr(dbl_scratch, ToMemOperand(to));
+        MemOperand to_operand = ToMemOperand(to);
+        __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
+      }
+    } else if (to->IsDoubleRegister()) {
+      ASSERT(from->IsDoubleStackSlot());
+      // TODO(regis): Why is vldr not taking a MemOperand?
+      // __ vldr(ToDoubleRegister(to), ToMemOperand(from));
+      MemOperand from_operand = ToMemOperand(from);
+      __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
+    } else {
+      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+      // TODO(regis): Why is vldr not taking a MemOperand?
+      // __ vldr(dbl_scratch, ToMemOperand(from));
+      MemOperand from_operand = ToMemOperand(from);
+      __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
+      // TODO(regis): Why is vstr not taking a MemOperand?
+      // __ vstr(dbl_scratch, ToMemOperand(to));
+      MemOperand to_operand = ToMemOperand(to);
+      __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
+    }
+  }
+
+  if (destroys_core_scratch) {
+    __ ldr(core_scratch, MemOperand(fp, -kPointerSize));
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  Abort("DoCallStub unimplemented.");
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Abort("DoModI unimplemented.");
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  Abort("DoDivI unimplemented.");
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  Register scratch = r9;
+  Register right = EmitLoadRegister(instr->right(), scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      !instr->right()->IsConstantOperand()) {
+    __ orr(ToRegister(instr->temp()), left, right);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    // scratch:left = left * right.
+    __ smull(scratch, left, left, right);
+    __ mov(ip, Operand(left, ASR, 31));
+    __ cmp(ip, Operand(scratch));
+    DeoptimizeIf(ne, instr->environment());
+  } else {
+    __ mul(left, left, right);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    Label done;
+    __ tst(left, Operand(left));
+    __ b(ne, &done);
+    if (instr->right()->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) {
+        DeoptimizeIf(no_condition, instr->environment());
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ cmp(ToRegister(instr->temp()), Operand(0));
+      DeoptimizeIf(mi, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  Register result = ToRegister(left);
+  Register right_reg = EmitLoadRegister(right, ip);
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ and_(result, ToRegister(left), Operand(right_reg));
+      break;
+    case Token::BIT_OR:
+      __ orr(result, ToRegister(left), Operand(right_reg));
+      break;
+    case Token::BIT_XOR:
+      __ eor(result, ToRegister(left), Operand(right_reg));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  Register result = ToRegister(left);
+  if (right->IsRegister()) {
+    // Mask the right operand.
+    __ and_(r9, ToRegister(right), Operand(0x1F));
+    switch (instr->op()) {
+      case Token::SAR:
+        __ mov(result, Operand(result, ASR, r9));
+        break;
+      case Token::SHR:
+        if (instr->can_deopt()) {
+          __ mov(result, Operand(result, LSR, r9), SetCC);
+          DeoptimizeIf(mi, instr->environment());
+        } else {
+          __ mov(result, Operand(result, LSR, r9));
+        }
+        break;
+      case Token::SHL:
+        __ mov(result, Operand(result, LSL, r9));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ mov(result, Operand(result, ASR, shift_count));
+        }
+        break;
+      case Token::SHR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ tst(result, Operand(0x80000000));
+          DeoptimizeIf(ne, instr->environment());
+        } else {
+          __ mov(result, Operand(result, LSR, shift_count));
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ mov(result, Operand(result, LSL, shift_count));
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = EmitLoadRegister(instr->right(), ip);
+  ASSERT(instr->left()->Equals(instr->result()));
+  __ sub(left, left, right, SetCC);
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(vs, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  Abort("DoConstantD unimplemented.");
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoArrayLength(LArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->value()->IsLoadElements()) {
+    // We load the length directly from the elements array.
+    Register elements = ToRegister(instr->input());
+    __ ldr(result, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  } else {
+    // Check that the receiver really is an array.
+    Register array = ToRegister(instr->input());
+    Register temporary = ToRegister(instr->temporary());
+    __ CompareObjectType(array, temporary, temporary, JS_ARRAY_TYPE);
+    DeoptimizeIf(ne, instr->environment());
+
+    // Load length directly from the array.
+    __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
+  }
+  Abort("DoArrayLength untested.");
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Abort("DoValueOf unimplemented.");
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->Equals(instr->result()));
+  __ mvn(ToRegister(input), Operand(ToRegister(input)));
+  Abort("DoBitNotI untested.");
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  Register input_reg = EmitLoadRegister(instr->input(), ip);
+  __ push(input_reg);
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    __ stop("Unreachable code.");
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  Register right_reg = EmitLoadRegister(right, ip);
+  __ add(ToRegister(left), ToRegister(left), Operand(right_reg), SetCC);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(vs, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ vadd(left, left, right);
+      break;
+    case Token::SUB:
+      __ vsub(left, left, right);
+      break;
+    case Token::MUL:
+      __ vmul(left, left, right);
+      break;
+    case Token::DIV:
+      __ vdiv(left, left, right);
+      break;
+    case Token::MOD: {
+      Abort("DoArithmeticD unimplemented for MOD.");
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->left()).is(r1));
+  ASSERT(ToRegister(instr->right()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
+  // GenericBinaryOpStub:
+  // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ b(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ b(cc, chunk_->GetAssemblyLabel(left_block));
+    __ b(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->input());
+    __ cmp(reg, Operand(0));
+    EmitBranch(true_block, false_block, nz);
+  } else if (r.IsDouble()) {
+    DoubleRegister reg = ToDoubleRegister(instr->input());
+    __ vcmp(reg, 0.0);
+    EmitBranch(true_block, false_block, ne);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->input());
+    if (instr->hydrogen()->type().IsBoolean()) {
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(reg, ip);
+      EmitBranch(true_block, false_block, eq);
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(reg, ip);
+      __ b(eq, false_label);
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(reg, ip);
+      __ b(eq, true_label);
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(reg, ip);
+      __ b(eq, false_label);
+      __ cmp(reg, Operand(0));
+      __ b(eq, false_label);
+      __ tst(reg, Operand(kSmiTagMask));
+      __ b(eq, true_label);
+
+      // Test for double values. Zero is false.
+      Label call_stub;
+      DoubleRegister dbl_scratch = d0;
+      Register core_scratch = r9;
+      ASSERT(!reg.is(core_scratch));
+      __ ldr(core_scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+      __ cmp(core_scratch, Operand(ip));
+      __ b(ne, &call_stub);
+      __ sub(ip, reg, Operand(kHeapObjectTag));
+      __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
+      __ vcmp(dbl_scratch, 0.0);
+      __ b(eq, false_label);
+      __ b(true_label);
+
+      // The conversion stub doesn't cause garbage collections so it's
+      // safe to not record a safepoint after the call.
+      __ bind(&call_stub);
+      ToBooleanStub stub(reg);
+      RegList saved_regs = kJSCallerSaved | kCalleeSaved;
+      __ stm(db_w, sp, saved_regs);
+      __ CallStub(&stub);
+      __ cmp(reg, Operand(0));
+      __ ldm(ia_w, sp, saved_regs);
+      EmitBranch(true_block, false_block, nz);
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+  // TODO(srdjan): Perform stack overflow check if this goto needs it
+  // before jumping.
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    __ jmp(chunk_->GetAssemblyLabel(block));
+  }
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+  UNIMPLEMENTED();
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  // TODO(srdjan): Implement deferred stack check.
+  EmitGoto(instr->block_id(), NULL);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  __ cmp(ToRegister(left), ToOperand(right));
+  Abort("EmitCmpI untested.");
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+  Abort("DoCmpID unimplemented.");
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  Abort("DoCmpIDAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  Register result = ToRegister(instr->result());
+
+  __ cmp(left, Operand(right));
+  __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
+  Abort("DoCmpJSObjectEq untested.");
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+  Abort("DoCmpJSObjectEqAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+  Abort("DoIsNull unimplemented.");
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(reg, ip);
+  if (instr->is_strict()) {
+    EmitBranch(true_block, false_block, eq);
+  } else {
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ b(eq, true_label);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(reg, ip);
+    __ b(eq, true_label);
+    __ tst(reg, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = ToRegister(instr->temp());
+    __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+    EmitBranch(true_block, false_block, ne);
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  Abort("EmitIsObject unimplemented.");
+  return ne;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+  Abort("DoIsObject unimplemented.");
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Abort("DoIsObjectAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  Register result = ToRegister(instr->result());
+  Register input_reg = EmitLoadRegister(instr->input(), ip);
+  __ tst(input_reg, Operand(kSmiTagMask));
+  __ LoadRoot(result, Heap::kTrueValueRootIndex);
+  Label done;
+  __ b(eq, &done);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Register input_reg = EmitLoadRegister(instr->input(), ip);
+  __ tst(input_reg, Operand(kSmiTagMask));
+  EmitBranch(true_block, false_block, eq);
+}
+
+
+InstanceType LHasInstanceType::TestType() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+Condition LHasInstanceType::BranchCondition() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+  Abort("DoHasInstanceType unimplemented.");
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ tst(input, Operand(kSmiTagMask));
+  __ b(eq, false_label);
+
+  __ CompareObjectType(input, temp, temp, instr->TestType());
+  EmitBranch(true_block, false_block, instr->BranchCondition());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+  Abort("DoHasCachedArrayIndex unimplemented.");
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Abort("DoHasCachedArrayIndexAndBranch unimplemented.");
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.  Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  Abort("EmitClassOfTest unimplemented.");
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+  Abort("DoClassOfTest unimplemented.");
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Abort("DoClassOfTestAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Abort("DoCmpMapAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  // We expect object and function in registers r1 and r0.
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  Label true_value, done;
+  __ tst(r0, r0);
+  __ mov(r0, Operand(Factory::false_value()), LeaveCC, eq);
+  __ mov(r0, Operand(Factory::true_value()), LeaveCC, ne);
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+  Abort("DoInstanceOfAndBranch unimplemented.");
+}
+
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  __ cmp(r0, Operand(0));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex,
+      condition);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex,
+      NegateCondition(condition));
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+  Abort("DoCmpTAndBranch unimplemented.");
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r0.
+    __ push(r0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+  __ add(sp, sp, Operand(sp_delta));
+  __ Jump(lr);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+  if (instr->hydrogen()->check_hole_value()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    DeoptimizeIf(eq, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+  Register value = ToRegister(instr->input());
+  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
+  __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Abort("DoLoadNamedField unimplemented.");
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  // Name is always in r2.
+  __ mov(r2, Operand(instr->name()));
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  Abort("DoLoadElements unimplemented.");
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Abort("DoAccessArgumentsAt unimplemented.");
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Abort("DoLoadKeyedFastElement unimplemented.");
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r1));
+  ASSERT(ToRegister(instr->key()).is(r0));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Abort("DoArgumentsElements unimplemented.");
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Abort("DoArgumentsLength unimplemented.");
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Abort("DoApplyArguments unimplemented.");
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->input();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort("DoPushArgument not implemented for double type.");
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, ip);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register result = ToRegister(instr->result());
+  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register result = ToRegister(instr->result());
+  __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr) {
+  // Change context if needed.
+  bool change_context =
+      (graph()->info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  }
+
+  // Set r0 to arguments count if adaption is not needed. Assumes that r0
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(r0, Operand(arity));
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ Call(ip);
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr);
+
+  // Restore context.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  Abort("DoCallConstantFunction unimplemented.");
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented.");
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  Abort("DoMathAbs unimplemented.");
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  Abort("DoMathFloor unimplemented.");
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  Abort("DoMathSqrt unimplemented.");
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    default:
+      Abort("Unimplemented type of LUnaryMathOperation.");
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  Abort("DoCallKeyed unimplemented.");
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(r2, Operand(instr->name()));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // Restore context register.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  Abort("DoCallFunction unimplemented.");
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  Abort("DoCallGlobal unimplemented.");
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(r0));
+  __ mov(r1, Operand(instr->target()));
+  CallKnownFunction(instr->target(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->input()).is(r1));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ mov(r0, Operand(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Abort("DoStoreNamedField unimplemented.");
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r1));
+  ASSERT(ToRegister(instr->value()).is(r0));
+
+  // Name is always in r2.
+  __ mov(r2, Operand(instr->name()));
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Abort("DoBoundsCheck unimplemented.");
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Abort("DoStoreKeyedFastElement unimplemented.");
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(r2));
+  ASSERT(ToRegister(instr->key()).is(r1));
+  ASSERT(ToRegister(instr->value()).is(r0));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  Abort("DoInteger32ToDouble unimplemented.");
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTag(reg, SetCC);
+  __ b(vs, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->input());
+  DoubleRegister dbl_scratch = d0;
+  SwVfpRegister flt_scratch = s0;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  Label done;
+  __ SmiUntag(reg);
+  __ eor(reg, reg, Operand(0x80000000));
+  __ vmov(flt_scratch, reg);
+  __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+  if (FLAG_inline_new) {
+    __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+    if (!reg.is(r5)) __ mov(reg, r5);
+    __ b(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ mov(ip, Operand(0));
+  int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
+  __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize));
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  if (!reg.is(r0)) __ mov(reg, r0);
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sub(ip, reg, Operand(kHeapObjectTag));
+  __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
+  __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize));
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->input());
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+  Register scratch = r9;
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sub(ip, reg, Operand(kHeapObjectTag));
+  __ vstr(input_reg, ip, HeapNumber::kValueOffset);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, Operand(0));
+
+  __ PushSafepointRegisters();
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
+  __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize));
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Abort("DoSmiUntag unimplemented.");
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                DoubleRegister result_reg,
+                                LEnvironment* env) {
+  Register core_scratch = r9;
+  ASSERT(!input_reg.is(core_scratch));
+  SwVfpRegister flt_scratch = s0;
+  ASSERT(!result_reg.is(d0));
+
+  Label load_smi, heap_number, done;
+
+  // Smi check.
+  __ tst(input_reg, Operand(kSmiTagMask));
+  __ b(eq, &load_smi);
+
+  // Heap number map check.
+  __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(core_scratch, Operand(ip));
+  __ b(eq, &heap_number);
+
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(input_reg, Operand(ip));
+  DeoptimizeIf(ne, env);
+
+  // Convert undefined to NaN.
+  __ LoadRoot(ip, Heap::kNanValueRootIndex);
+  __ sub(ip, ip, Operand(kHeapObjectTag));
+  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+  __ jmp(&done);
+
+  // Heap number to double register conversion.
+  __ bind(&heap_number);
+  __ sub(ip, input_reg, Operand(kHeapObjectTag));
+  __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+  __ jmp(&done);
+
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ vmov(flt_scratch, input_reg);
+  __ vcvt_f64_s32(result_reg, flt_scratch);
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Label done;
+  Register input_reg = ToRegister(instr->input());
+  Register core_scratch = r9;
+  ASSERT(!input_reg.is(core_scratch));
+  DoubleRegister dbl_scratch = d0;
+  SwVfpRegister flt_scratch = s0;
+  DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp());
+
+  // Heap number map check.
+  __ ldr(core_scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(core_scratch, Operand(ip));
+
+  if (instr->truncating()) {
+    Label heap_number;
+    __ b(eq, &heap_number);
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(input_reg, Operand(ip));
+    DeoptimizeIf(ne, instr->environment());
+    __ mov(input_reg, Operand(0));
+    __ b(&done);
+
+    __ bind(&heap_number);
+    __ sub(ip, input_reg, Operand(kHeapObjectTag));
+    __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
+    __ vcmp(dbl_tmp, 0.0);  // Sets overflow bit if NaN.
+    __ vcvt_s32_f64(flt_scratch, dbl_tmp);
+    __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
+    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    // Overflow bit is set if dbl_tmp is Nan.
+    __ cmn(input_reg, Operand(1), vc);  // 0x7fffffff + 1 -> overflow.
+    __ cmp(input_reg, Operand(1), vc);  // 0x80000000 - 1 -> overflow.
+    DeoptimizeIf(vs, instr->environment());  // Saturation may have occured.
+
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(ne, instr->environment());
+
+    __ sub(ip, input_reg, Operand(kHeapObjectTag));
+    __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
+    __ vcvt_s32_f64(flt_scratch, dbl_tmp);
+    __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
+    // Non-truncating conversion means that we cannot lose bits, so we convert
+    // back to check; note that using non-overlapping s and d regs would be
+    // slightly faster.
+    __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+    __ vcmp(dbl_scratch, dbl_tmp);
+    __ vmrs(pc);  // Move vector status bits to normal status bits.
+    DeoptimizeIf(ne, instr->environment());  // Not equal or unordered.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ tst(input_reg, Operand(input_reg));
+      __ b(ne, &done);
+      __ vmov(lr, ip, dbl_tmp);
+      __ tst(ip, Operand(1 << 31));  // Test sign bit.
+      DeoptimizeIf(ne, instr->environment());
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Smi check.
+  __ tst(input_reg, Operand(kSmiTagMask));
+  __ b(ne, deferred->entry());
+
+  // Smi to int32 conversion
+  __ SmiUntag(input_reg);  // Untag smi.
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg, instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Abort("DoDoubleToI unimplemented.");
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  __ tst(ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(instr->condition(), instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Abort("DoCheckInstanceType unimplemented.");
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->input()->IsRegister());
+  Register reg = ToRegister(instr->input());
+  __ cmp(reg, Operand(instr->hydrogen()->target()));
+  DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ ldr(r9, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ cmp(r9, Operand(instr->hydrogen()->map()));
+  DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::LoadPrototype(Register result,
+                             Handle<JSObject> prototype) {
+  Abort("LoadPrototype unimplemented.");
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Abort("DoCheckPrototypeMaps unimplemented.");
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  Abort("DoArrayLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  Abort("DoObjectLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  Abort("DoRegExpLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  Abort("DoFunctionLiteral unimplemented.");
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  Abort("DoTypeof unimplemented.");
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+  Abort("DoTypeofIs unimplemented.");
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Condition final_branch_condition = no_condition;
+  Register core_scratch = r9;
+  ASSERT(!input.is(core_scratch));
+  if (type_name->Equals(Heap::number_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, true_label);
+    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(input, Operand(ip));
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(Heap::string_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ tst(ip, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, false_label);
+    __ CompareInstanceType(input, core_scratch, FIRST_NONSTRING_TYPE);
+    final_branch_condition = lo;
+
+  } else if (type_name->Equals(Heap::boolean_symbol())) {
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(input, ip);
+    __ b(eq, true_label);
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(input, ip);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(Heap::undefined_symbol())) {
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(input, ip);
+    __ b(eq, true_label);
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    // Check for undetectable objects => true.
+    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ tst(ip, Operand(1 << Map::kIsUndetectable));
+    final_branch_condition = ne;
+
+  } else if (type_name->Equals(Heap::function_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    __ CompareObjectType(input, input, core_scratch, JS_FUNCTION_TYPE);
+    __ b(eq, true_label);
+    // Regular expressions => 'function' (they are callable).
+    __ CompareInstanceType(input, core_scratch, JS_REGEXP_TYPE);
+    final_branch_condition = eq;
+
+  } else if (type_name->Equals(Heap::object_symbol())) {
+    __ tst(input, Operand(kSmiTagMask));
+    __ b(eq, false_label);
+    __ LoadRoot(ip, Heap::kNullValueRootIndex);
+    __ cmp(input, ip);
+    __ b(eq, true_label);
+    // Regular expressions => 'function', not 'object'.
+    __ CompareObjectType(input, input, core_scratch, JS_REGEXP_TYPE);
+    __ b(eq, false_label);
+    // Check for undetectable objects => false.
+    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ tst(ip, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, false_label);
+    // Check for JS objects => true.
+    __ CompareInstanceType(input, core_scratch, FIRST_JS_OBJECT_TYPE);
+    __ b(lo, false_label);
+    __ CompareInstanceType(input, core_scratch, LAST_JS_OBJECT_TYPE);
+    final_branch_condition = ls;
+
+  } else {
+    final_branch_condition = ne;
+    __ b(false_label);
+    // A dead branch instruction will be generated after this point.
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  Abort("DoDeleteProperty unimplemented.");
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  // Perform stack overflow check.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+  StackCheckStub stub;
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&ok);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  Abort("DoOsrEntry unimplemented.");
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
new file mode 100644
index 0000000..541a699
--- /dev/null
+++ b/src/arm/lithium-codegen-arm.h
@@ -0,0 +1,274 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_LITHIUM_CODEGEN_ARM_H_
+#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
+
+#include "arm/lithium-arm.h"
+
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(chunk->graph()->info()->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LGoto* instr);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+  MacroAssembler* masm() const { return masm_; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int StackSlotCount() const { return chunk()->spill_slot_count(); }
+  int ParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateSafepointTable();
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+  void CallRuntime(Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr);
+
+  void LoadPrototype(Register result, Handle<JSObject> prototype);
+
+  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        SwVfpRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+
+  int ToInteger32(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input,
+                        DoubleRegister result,
+                        LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen), external_exit_(NULL) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 91a4607..4a13146 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -171,13 +171,6 @@
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  LoadRoot(ip, Heap::kStackLimitRootIndex);
-  cmp(sp, Operand(ip));
-  b(lo, on_stack_overflow);
-}
-
-
 void MacroAssembler::Drop(int count, Condition cond) {
   if (count > 0) {
     add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
@@ -185,6 +178,12 @@
 }
 
 
+void MacroAssembler::Ret(int drop, Condition cond) {
+  Drop(drop, cond);
+  Ret(cond);
+}
+
+
 void MacroAssembler::Swap(Register reg1,
                           Register reg2,
                           Register scratch,
@@ -447,6 +446,34 @@
 }
 
 
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of contiguous register values starting with r0:
+  ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ASSERT(num_unsaved >= 0);
+  sub(sp, sp, Operand(num_unsaved * kPointerSize));
+  stm(db_w, sp, kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  ldm(ia_w, sp, kSafepointSavedRegisters);
+  add(sp, sp, Operand(num_unsaved * kPointerSize));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  return reg_code;
+}
+
+
 void MacroAssembler::Ldrd(Register dst1, Register dst2,
                           const MemOperand& src, Condition cond) {
   ASSERT(src.rm().is(no_reg));
@@ -515,12 +542,8 @@
 }
 
 
-void MacroAssembler::EnterExitFrame() {
-  // Compute the argv pointer and keep it in a callee-saved register.
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   // r0 is argc.
-  add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
-  sub(r6, r6, Operand(kPointerSize));
-
   // Compute callee's stack pointer before making changes and save it as
   // ip register so that it is restored as sp register on exit, thereby
   // popping the args.
@@ -528,6 +551,9 @@
   // ip = sp + kPointerSize * #args;
   add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
 
+  // Compute the argv pointer and keep it in a callee-saved register.
+  sub(r6, ip, Operand(kPointerSize));
+
   // Prepare the stack to be aligned when calling into C. After this point there
   // are 5 pushes before the call into C, so the stack needs to be aligned after
   // 5 pushes.
@@ -558,6 +584,28 @@
   // Setup argc and the builtin function in callee-saved registers.
   mov(r4, Operand(r0));
   mov(r5, Operand(r1));
+
+  // Optionally save all double registers.
+  if (save_doubles) {
+    // TODO(regis): Use vstrm instruction.
+    // The stack alignment code above made sp unaligned, so add space for one
+    // more double register and use aligned addresses.
+    ASSERT(kDoubleSize == frame_alignment);
+    // Mark the frame as containing doubles by pushing a non-valid return
+    // address, i.e. 0.
+    ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
+    mov(ip, Operand(0));  // Marker and alignment word.
+    push(ip);
+    int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
+    sub(sp, sp, Operand(space));
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      vstr(reg, sp, i * kDoubleSize + kPointerSize);
+    }
+    // Note that d0 will be accessible at fp - 2*kPointerSize -
+    // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
+    // alignment word were pushed after the fp.
+  }
 }
 
 
@@ -592,7 +640,18 @@
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore all double registers.
+  if (save_doubles) {
+    // TODO(regis): Use vldrm instruction.
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      // Register d15 is just below the marker.
+      const int offset = ExitFrameConstants::kMarkerOffset;
+      vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
+    }
+  }
+
   // Clear top frame.
   mov(r3, Operand(0, RelocInfo::NONE));
   mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -756,7 +815,47 @@
   // Invoke the cached code.
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    InvokeCode(r3, expected, actual, flag);
+  } else {
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  }
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+                                          Register map,
+                                          Register scratch,
+                                          Label* fail) {
+  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+  IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+                                            Register scratch,
+                                            Label* fail) {
+  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+  b(lt, fail);
+  cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
+  b(gt, fail);
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+                                           Register scratch,
+                                           Label* fail) {
+  ASSERT(kNotStringTag != 0);
+
+  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  tst(scratch, Operand(kIsNotStringMask));
+  b(nz, fail);
 }
 
 
@@ -920,6 +1019,7 @@
   }
 
   ASSERT(!result.is(scratch1));
+  ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
 
   // Make object size into bytes.
@@ -928,38 +1028,55 @@
   }
   ASSERT_EQ(0, object_size & kObjectAlignmentMask);
 
-  // Load address of new object into result and allocation top address into
-  // scratch1.
+  // Check relative positions of allocation top and limit addresses.
+  // The values must be adjacent in memory to allow the use of LDM.
+  // Also, assert that the registers are numbered such that the values
+  // are loaded in the correct order.
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address();
-  mov(scratch1, Operand(new_space_allocation_top));
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  intptr_t top   =
+      reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+  intptr_t limit =
+      reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+  ASSERT((limit - top) == kPointerSize);
+  ASSERT(result.code() < ip.code());
+
+  // Set up allocation top address and object size registers.
+  Register topaddr = scratch1;
+  Register obj_size_reg = scratch2;
+  mov(topaddr, Operand(new_space_allocation_top));
+  mov(obj_size_reg, Operand(object_size));
+
+  // This code stores a temporary value in ip. This is OK, as the code below
+  // does not need ip for implicit literal generation.
   if ((flags & RESULT_CONTAINS_TOP) == 0) {
-    ldr(result, MemOperand(scratch1));
-  } else if (FLAG_debug_code) {
-    // Assert that result actually contains top on entry. scratch2 is used
-    // immediately below so this use of scratch2 does not cause difference with
-    // respect to register content between debug and release mode.
-    ldr(scratch2, MemOperand(scratch1));
-    cmp(result, scratch2);
-    Check(eq, "Unexpected allocation top");
+    // Load allocation top into result and allocation limit into ip.
+    ldm(ia, topaddr, result.bit() | ip.bit());
+  } else {
+    if (FLAG_debug_code) {
+      // Assert that result actually contains top on entry. ip is used
+      // immediately below so this use of ip does not cause difference with
+      // respect to register content between debug and release mode.
+      ldr(ip, MemOperand(topaddr));
+      cmp(result, ip);
+      Check(eq, "Unexpected allocation top");
+    }
+    // Load allocation limit into ip. Result already contains allocation top.
+    ldr(ip, MemOperand(topaddr, limit - top));
   }
 
   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top.
-  ExternalReference new_space_allocation_limit =
-      ExternalReference::new_space_allocation_limit_address();
-  mov(scratch2, Operand(new_space_allocation_limit));
-  ldr(scratch2, MemOperand(scratch2));
-  add(result, result, Operand(object_size));
-  cmp(result, Operand(scratch2));
+  add(scratch2, result, Operand(obj_size_reg));
+  cmp(scratch2, Operand(ip));
   b(hi, gc_required);
-  str(result, MemOperand(scratch1));
+  str(scratch2, MemOperand(topaddr));
 
-  // Tag and adjust back to start of new object.
+  // Tag object if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    sub(result, result, Operand(object_size - kHeapObjectTag));
-  } else {
-    sub(result, result, Operand(object_size));
+    add(result, result, Operand(kHeapObjectTag));
   }
 }
 
@@ -981,53 +1098,69 @@
     return;
   }
 
+  // Assert that the register arguments are different and that none of
+  // them are ip. ip is used explicitly in the code generated below.
   ASSERT(!result.is(scratch1));
+  ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
+  ASSERT(!result.is(ip));
+  ASSERT(!scratch1.is(ip));
+  ASSERT(!scratch2.is(ip));
 
-  // Load address of new object into result and allocation top address into
-  // scratch1.
+  // Check relative positions of allocation top and limit addresses.
+  // The values must be adjacent in memory to allow the use of LDM.
+  // Also, assert that the registers are numbered such that the values
+  // are loaded in the correct order.
   ExternalReference new_space_allocation_top =
       ExternalReference::new_space_allocation_top_address();
-  mov(scratch1, Operand(new_space_allocation_top));
+  ExternalReference new_space_allocation_limit =
+      ExternalReference::new_space_allocation_limit_address();
+  intptr_t top =
+      reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+  intptr_t limit =
+      reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+  ASSERT((limit - top) == kPointerSize);
+  ASSERT(result.code() < ip.code());
+
+  // Set up allocation top address.
+  Register topaddr = scratch1;
+  mov(topaddr, Operand(new_space_allocation_top));
+
+  // This code stores a temporary value in ip. This is OK, as the code below
+  // does not need ip for implicit literal generation.
   if ((flags & RESULT_CONTAINS_TOP) == 0) {
-    ldr(result, MemOperand(scratch1));
-  } else if (FLAG_debug_code) {
-    // Assert that result actually contains top on entry. scratch2 is used
-    // immediately below so this use of scratch2 does not cause difference with
-    // respect to register content between debug and release mode.
-    ldr(scratch2, MemOperand(scratch1));
-    cmp(result, scratch2);
-    Check(eq, "Unexpected allocation top");
+    // Load allocation top into result and allocation limit into ip.
+    ldm(ia, topaddr, result.bit() | ip.bit());
+  } else {
+    if (FLAG_debug_code) {
+      // Assert that result actually contains top on entry. ip is used
+      // immediately below so this use of ip does not cause difference with
+      // respect to register content between debug and release mode.
+      ldr(ip, MemOperand(topaddr));
+      cmp(result, ip);
+      Check(eq, "Unexpected allocation top");
+    }
+    // Load allocation limit into ip. Result already contains allocation top.
+    ldr(ip, MemOperand(topaddr, limit - top));
   }
 
   // Calculate new top and bail out if new space is exhausted. Use result
-  // to calculate the new top. Object size is in words so a shift is required to
-  // get the number of bytes
-  ExternalReference new_space_allocation_limit =
-      ExternalReference::new_space_allocation_limit_address();
-  mov(scratch2, Operand(new_space_allocation_limit));
-  ldr(scratch2, MemOperand(scratch2));
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
-    add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
+    add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2));
   } else {
-    add(result, result, Operand(object_size));
+    add(scratch2, result, Operand(object_size));
   }
-  cmp(result, Operand(scratch2));
+  cmp(scratch2, Operand(ip));
   b(hi, gc_required);
 
   // Update allocation top. result temporarily holds the new top.
   if (FLAG_debug_code) {
-    tst(result, Operand(kObjectAlignmentMask));
+    tst(scratch2, Operand(kObjectAlignmentMask));
     Check(eq, "Unaligned allocation in new space");
   }
-  str(result, MemOperand(scratch1));
-
-  // Adjust back to start of new object.
-  if ((flags & SIZE_IN_WORDS) != 0) {
-    sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
-  } else {
-    sub(result, result, Operand(object_size));
-  }
+  str(scratch2, MemOperand(topaddr));
 
   // Tag object if requested.
   if ((flags & TAG_OBJECT) != 0) {
@@ -1485,6 +1618,16 @@
 }
 
 
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+  Runtime::Function* function = Runtime::FunctionForId(id);
+  mov(r0, Operand(function->nargs));
+  mov(r1, Operand(ExternalReference(function)));
+  CEntryStub stub(1);
+  stub.SaveDoubles();
+  CallStub(&stub);
+}
+
+
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                            int num_arguments) {
   mov(r0, Operand(num_arguments));
@@ -1747,18 +1890,6 @@
 }
 
 
-void MacroAssembler::AbortIfNotString(Register object) {
-  STATIC_ASSERT(kSmiTag == 0);
-  tst(object, Operand(kSmiTagMask));
-  Assert(ne, "Operand is not a string");
-  push(object);
-  ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
-  CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
-  pop(object);
-  Assert(lo, "Operand is not a string");
-}
-
-
 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
     Register first,
     Register second,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 27d1671..97bbb2f 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -96,6 +96,7 @@
   // from the stack, clobbering only the sp register.
   void Drop(int count, Condition cond = al);
 
+  void Ret(int drop, Condition cond = al);
 
   // Swap two registers.  If the scratch register is omitted then a slightly
   // less efficient form using xor instead of mov is emitted.
@@ -224,6 +225,12 @@
     }
   }
 
+  // Push and pop the registers that can hold pointers, as defined by the
+  // RegList constant kSafepointSavedRegisters.
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  static int SafepointRegisterStackIndex(int reg_code);
+
   // Load two consecutive registers with two consecutive memory locations.
   void Ldrd(Register dst1,
             Register dst2,
@@ -237,11 +244,6 @@
             Condition cond = al);
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -254,10 +256,10 @@
   // Expects the number of arguments in register r0 and
   // the builtin function to call in register r1. Exits with argc in
   // r4, argv in r6, and and the builtin function to call in r5.
-  void EnterExitFrame();
+  void EnterExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in r0.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
@@ -297,6 +299,18 @@
                       const ParameterCount& actual,
                       InvokeFlag flag);
 
+  void IsObjectJSObjectType(Register heap_object,
+                            Register map,
+                            Register scratch,
+                            Label* fail);
+
+  void IsInstanceJSObjectType(Register map,
+                              Register scratch,
+                              Label* fail);
+
+  void IsObjectJSStringType(Register object,
+                            Register scratch,
+                            Label* fail);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -575,6 +589,7 @@
 
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId fid, int num_arguments);
@@ -665,6 +680,14 @@
   // ---------------------------------------------------------------------------
   // Smi utilities
 
+  void SmiTag(Register reg, SBit s = LeaveCC) {
+    add(reg, reg, Operand(reg), s);
+  }
+
+  void SmiUntag(Register reg) {
+    mov(reg, Operand(reg, ASR, kSmiTagSize));
+  }
+
   // Jump if either of the registers contain a non-smi.
   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
   // Jump if either of the registers contain a smi.
@@ -673,9 +696,6 @@
   // Abort execution if argument is a smi. Used in debug code.
   void AbortIfSmi(Register object);
 
-  // Abort execution if argument is a string. Used in debug code.
-  void AbortIfNotString(Register object);
-
   // ---------------------------------------------------------------------------
   // String utilities
 
@@ -769,6 +789,17 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+  PostCallGenerator() { }
+  virtual ~PostCallGenerator() { }
+  virtual void Generate() = 0;
+};
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index ab0cf60..143b839 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -74,6 +74,7 @@
   Simulator* sim_;
 
   int32_t GetRegisterValue(int regnum);
+  double GetVFPDoubleRegisterValue(int regnum);
   bool GetValue(const char* desc, int32_t* value);
   bool GetVFPSingleValue(const char* desc, float* value);
   bool GetVFPDoubleValue(const char* desc, double* value);
@@ -168,6 +169,11 @@
 }
 
 
+double Debugger::GetVFPDoubleRegisterValue(int regnum) {
+  return sim_->get_double_from_d_register(regnum);
+}
+
+
 bool Debugger::GetValue(const char* desc, int32_t* value) {
   int regnum = Registers::Number(desc);
   if (regnum != kNoRegister) {
@@ -309,6 +315,11 @@
               value = GetRegisterValue(i);
               PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
             }
+            for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
+              dvalue = GetVFPDoubleRegisterValue(i);
+              PrintF("%3s: %f\n",
+                  VFPRegisters::Name(i, true), dvalue);
+            }
           } else {
             if (GetValue(arg1, &value)) {
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
@@ -837,6 +848,11 @@
 }
 
 
+bool Simulator::has_bad_pc() const {
+  return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
+}
+
+
 // Raw access to the PC register without the special adjustment when reading.
 int32_t Simulator::get_pc() const {
   return registers_[pc];
@@ -989,9 +1005,7 @@
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
   UNIMPLEMENTED();
   return 0;
 #endif
@@ -1009,9 +1023,7 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
   UNIMPLEMENTED();
 #endif
 }
@@ -1026,9 +1038,7 @@
     uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
     return *ptr;
   }
-  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
   UNIMPLEMENTED();
   return 0;
 #endif
@@ -1062,9 +1072,7 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
   UNIMPLEMENTED();
 #endif
 }
@@ -1081,9 +1089,7 @@
     *ptr = value;
     return;
   }
-  PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
-         addr,
-         reinterpret_cast<intptr_t>(instr));
+  PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
   UNIMPLEMENTED();
 #endif
 }
@@ -1520,7 +1526,8 @@
 typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
                                         int32_t arg1,
                                         int32_t arg2,
-                                        int32_t arg3);
+                                        int32_t arg3,
+                                        int32_t arg4);
 typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
                                          int32_t arg1,
                                          int32_t arg2,
@@ -1543,6 +1550,8 @@
       int32_t arg1 = get_register(r1);
       int32_t arg2 = get_register(r2);
       int32_t arg3 = get_register(r3);
+      int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+      int32_t arg4 = *stack_pointer;
       // This is dodgy but it works because the C entry stubs are never moved.
       // See comment in codegen-arm.cc and bug 1242173.
       int32_t saved_lr = get_register(lr);
@@ -1571,19 +1580,20 @@
             reinterpret_cast<SimulatorRuntimeCall>(external);
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
           PrintF(
-              "Call to host function at %p with args %08x, %08x, %08x, %08x",
+              "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
               FUNCTION_ADDR(target),
               arg0,
               arg1,
               arg2,
-              arg3);
+              arg3,
+              arg4);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
           PrintF("\n");
         }
         CHECK(stack_aligned);
-        int64_t result = target(arg0, arg1, arg2, arg3);
+        int64_t result = target(arg0, arg1, arg2, arg3, arg4);
         int32_t lo_res = static_cast<int32_t>(result);
         int32_t hi_res = static_cast<int32_t>(result >> 32);
         if (::v8::internal::FLAG_trace_sim) {
@@ -1918,9 +1928,12 @@
           set_register(lr, old_pc + Instr::kInstrSize);
           break;
         }
-        case BKPT:
-          v8::internal::OS::DebugBreak();
+        case BKPT: {
+          Debugger dbg(this);
+          PrintF("Simulator hit BKPT.\n");
+          dbg.Debug();
           break;
+        }
         default:
           UNIMPLEMENTED();
       }
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index c37b3f7..7bfe76a 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -186,6 +186,10 @@
   // ICache checking.
   static void FlushICache(void* start, size_t size);
 
+  // Returns true if pc register contains one of the 'special_values' defined
+  // below (bad_lr, end_sim_pc).
+  bool has_bad_pc() const;
+
  private:
   enum special_values {
     // Known bad pc value to ensure that the simulator does not execute
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 2117ce6..c2a9796 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -874,6 +874,34 @@
   return cell;
 }
 
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
+    }
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
+  }
+  return NULL;
+}
+
+
 
 #undef __
 #define __ ACCESS_MASM(masm())
@@ -911,18 +939,19 @@
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
+    ASSERT(current->GetPrototype()->IsJSObject());
     JSObject* prototype = JSObject::cast(current->GetPrototype());
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        MaybeObject* lookup_result = Heap::LookupSymbol(name);
-        if (lookup_result->IsFailure()) {
-          set_failure(Failure::cast(lookup_result));
+        MaybeObject* maybe_lookup_result = Heap::LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
           return reg;
-        } else {
-          name = String::cast(lookup_result->ToObjectUnchecked());
         }
+        name = String::cast(lookup_result);
       }
       ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
@@ -936,7 +965,7 @@
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       reg = holder_reg;  // from now the object is in holder_reg
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
+    } else if (Heap::InNewSpace(prototype)) {
       // Get the map of the current object.
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
       __ cmp(scratch1, Operand(Handle<Map>(current->map())));
@@ -956,14 +985,24 @@
       }
 
       reg = holder_reg;  // from now the object is in holder_reg
-      if (Heap::InNewSpace(prototype)) {
-        // The prototype is in new space; we cannot store a reference
-        // to it in the code. Load it from the map.
-        __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, Operand(Handle<JSObject>(prototype)));
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ b(ne, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
       }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, Operand(Handle<JSObject>(prototype)));
     }
 
     if (save_at_depth == depth) {
@@ -982,32 +1021,22 @@
   // Log the check depth.
   LOG(IntEvent("check-maps-depth", depth + 1));
 
-  // Perform security check for access to the global object and return
-  // the holder register.
-  ASSERT(current == holder);
-  ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
-  if (current->IsJSGlobalProxy()) {
+  // Perform security check for access to the global object.
+  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+  if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
+  };
 
   // If we've skipped any global objects, it's not enough to verify
   // that their maps haven't changed.  We also need to check that the
   // property cell for the property is still empty.
-  current = object;
-  while (current != holder) {
-    if (current->IsGlobalObject()) {
-      MaybeObject* cell = GenerateCheckPropertyCell(masm(),
-                                                    GlobalObject::cast(current),
-                                                    name,
-                                                    scratch1,
-                                                    miss);
-      if (cell->IsFailure()) {
-        set_failure(Failure::cast(cell));
-        return reg;
-      }
-    }
-    current = JSObject::cast(current->GetPrototype());
-  }
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
 
   // Return the register containing the holder.
   return reg;
@@ -1652,7 +1681,7 @@
   __ Drop(argc + 1);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1729,7 +1758,7 @@
   __ Drop(argc + 1);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1804,7 +1833,7 @@
   __ Drop(argc + 1);
   __ Ret();
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -1923,7 +1952,7 @@
   __ cmp(r7, Operand(HeapNumber::kMantissaBits));
   // If greater or equal, the argument is already round and in r0.
   __ b(&restore_fpscr_and_return, ge);
-  __ b(&wont_fit_smi);
+  __ b(&slow);
 
   __ bind(&no_vfp_exception);
   // Move the result back to general purpose register r0.
@@ -1951,10 +1980,10 @@
   __ Ret();
 
   __ bind(&wont_fit_smi);
+  __ bind(&slow);
   // Restore FPCSR and fall to slow case.
   __ vmsr(r3);
 
-  __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
@@ -2083,8 +2112,8 @@
   //  -- lr    : return address
   // -----------------------------------
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, NULL, function, name);
     Object* result;
@@ -2294,8 +2323,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, cell, function, name);
     Object* result;
@@ -2330,8 +2359,16 @@
   ASSERT(function->is_compiled());
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION);
+  } else {
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2864,13 +2901,62 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  //  -- r0    : key
+  //  -- r1    : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the map matches.
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r2, Operand(Handle<Map>(receiver->map())));
+  __ b(ne, &miss);
+
+  // Check that the key is a smi.
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(ne, &miss);
+
+  // Get the elements array.
+  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ AssertFastElements(r2);
+
+  // Check that the key is within bounds.
+  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ cmp(r0, Operand(r3));
+  __ b(hs, &miss);
+
+  // Load the result and make sure it's not the hole.
+  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ ldr(r4,
+         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r4, ip);
+  __ b(eq, &miss);
+  __ mov(r0, r4);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
                                                        Map* transition,
                                                        String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
-  //  -- r1    : key
+  //  -- r1    : name
   //  -- r2    : receiver
   //  -- lr    : return address
   // -----------------------------------
@@ -2902,6 +2988,76 @@
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : key
+  //  -- r2    : receiver
+  //  -- lr    : return address
+  //  -- r3    : scratch
+  //  -- r4    : scratch (elements)
+  // -----------------------------------
+  Label miss;
+
+  Register value_reg = r0;
+  Register key_reg = r1;
+  Register receiver_reg = r2;
+  Register scratch = r3;
+  Register elements_reg = r4;
+
+  // Check that the receiver isn't a smi.
+  __ tst(receiver_reg, Operand(kSmiTagMask));
+  __ b(eq, &miss);
+
+  // Check that the map matches.
+  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(Handle<Map>(receiver->map())));
+  __ b(ne, &miss);
+
+  // Check that the key is a smi.
+  __ tst(key_reg, Operand(kSmiTagMask));
+  __ b(ne, &miss);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ ldr(elements_reg,
+         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(Handle<Map>(Factory::fixed_array_map())));
+  __ b(ne, &miss);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+  } else {
+    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+  }
+  // Compare smis.
+  __ cmp(key_reg, scratch);
+  __ b(hs, &miss);
+
+  __ add(scratch,
+         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ str(value_reg,
+         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ RecordWrite(scratch,
+                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+                 receiver_reg , elements_reg);
+
+  // value_reg (r0) is preserved.
+  // Done.
+  __ Ret();
+
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 3266a16..45f4876 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -68,8 +68,6 @@
 
 void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
   if (Equals(expected)) return;
-  ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
-         expected->tos_known_smi_map_);
   ASSERT(expected->IsCompatibleWith(this));
   MergeTOSTo(expected->top_of_stack_state_, cond);
   ASSERT(register_allocation_map_ == expected->register_allocation_map_);
@@ -78,7 +76,7 @@
 
 void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
   if (Equals(expected)) return;
-  tos_known_smi_map_ &= expected->tos_known_smi_map_;
+  expected->tos_known_smi_map_ &= tos_known_smi_map_;
   MergeTOSTo(expected->top_of_stack_state_, cond);
   ASSERT(register_allocation_map_ == expected->register_allocation_map_);
 }
diff --git a/src/array.js b/src/array.js
index 5ecf5e3..0f1e969 100644
--- a/src/array.js
+++ b/src/array.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -148,6 +148,9 @@
         }
       }
     }
+    elements.length = elements_length;
+    var result = %_FastAsciiArrayJoin(elements, "");
+    if (!IS_UNDEFINED(result)) return result;
     return %StringBuilderConcat(elements, elements_length, '');
   } finally {
     // Make sure to pop the visited array no matter what happens.
@@ -156,9 +159,11 @@
 }
 
 
-function ConvertToString(e) {
-  if (e == null) return '';
-  else return ToString(e);
+function ConvertToString(x) {
+  if (IS_STRING(x)) return x;
+  if (IS_NUMBER(x)) return %_NumberToString(x);
+  if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
+  return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
 }
 
 
@@ -362,14 +367,13 @@
   if (IS_UNDEFINED(separator)) {
     separator = ',';
   } else if (!IS_STRING(separator)) {
-    separator = ToString(separator);
+    separator = NonStringToString(separator);
   }
 
   var result = %_FastAsciiArrayJoin(this, separator);
-  if (typeof result != "undefined") return result;
+  if (!IS_UNDEFINED(result)) return result;
 
-  var length = TO_UINT32(this.length);
-  return Join(this, length, separator, ConvertToString);
+  return Join(this, TO_UINT32(this.length), separator, ConvertToString);
 }
 
 
@@ -673,39 +677,76 @@
 
   function QuickSort(a, from, to) {
     // Insertion sort is faster for short arrays.
-    if (to - from <= 22) {
+    if (to - from <= 10) {
       InsertionSort(a, from, to);
       return;
     }
-    var pivot_index = $floor($random() * (to - from)) + from;
-    var pivot = a[pivot_index];
-    // Issue 95: Keep the pivot element out of the comparisons to avoid
-    // infinite recursion if comparefn(pivot, pivot) != 0.
-    %_SwapElements(a, from, pivot_index);
-    var low_end = from;   // Upper bound of the elements lower than pivot.
-    var high_start = to;  // Lower bound of the elements greater than pivot.
+    // Find a pivot as the median of first, last and middle element.
+    var v0 = a[from];
+    var v1 = a[to - 1];
+    var middle_index = from + ((to - from) >> 1);
+    var v2 = a[middle_index];
+    var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
+    if (c01 > 0) {
+      // v1 < v0, so swap them.
+      var tmp = v0;
+      v0 = v1;
+      v1 = tmp;
+    } // v0 <= v1.
+    var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
+    if (c02 >= 0) {
+      // v2 <= v0 <= v1.
+      var tmp = v0;
+      v0 = v2;
+      v2 = v1;
+      v1 = tmp;
+    } else {
+      // v0 <= v1 && v0 < v2
+      var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
+      if (c12 > 0) {
+        // v0 <= v2 < v1
+        var tmp = v1;
+        v1 = v2;
+        v2 = tmp;
+      }
+    }
+    // v0 <= v1 <= v2
+    a[from] = v0;
+    a[to - 1] = v2;
+    var pivot = v1;
+    var low_end = from + 1;   // Upper bound of elements lower than pivot.
+    var high_start = to - 1;  // Lower bound of elements greater than pivot.
+    a[middle_index] = a[low_end];
+    a[low_end] = pivot;
+
     // From low_end to i are elements equal to pivot.
     // From i to high_start are elements that haven't been compared yet.
-    for (var i = from + 1; i < high_start; ) {
+    partition: for (var i = low_end + 1; i < high_start; i++) {
       var element = a[i];
       var order = %_CallFunction(global_receiver, element, pivot, comparefn);
       if (order < 0) {
         %_SwapElements(a, i, low_end);
-        i++;
         low_end++;
       } else if (order > 0) {
-        high_start--;
+        do {
+          high_start--;
+          if (high_start == i) break partition;
+          var top_elem = a[high_start];
+          order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
+        } while (order > 0);
         %_SwapElements(a, i, high_start);
-      } else {  // order == 0
-        i++;
+        if (order < 0) {
+          %_SwapElements(a, i, low_end);
+          low_end++;
+        }
       }
     }
     QuickSort(a, from, low_end);
     QuickSort(a, high_start, to);
   }
 
-  // Copies elements in the range 0..length from obj's prototype chain
-  // to obj itself, if obj has holes. Returns one more than the maximal index
+  // Copy elements in the range 0..length from obj's prototype chain
+  // to obj itself, if obj has holes. Return one more than the maximal index
   // of a prototype property.
   function CopyFromPrototype(obj, length) {
     var max = 0;
diff --git a/src/assembler.cc b/src/assembler.cc
index 7493673..eeb8412 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -35,10 +35,12 @@
 #include "v8.h"
 
 #include "arguments.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "ic-inl.h"
 #include "factory.h"
 #include "runtime.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "stub-cache.h"
 #include "regexp-stack.h"
@@ -62,6 +64,11 @@
 namespace internal {
 
 
+const double DoubleConstant::min_int = kMinInt;
+const double DoubleConstant::one_half = 0.5;
+const double DoubleConstant::negative_infinity = -V8_INFINITY;
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Label
 
@@ -210,7 +217,7 @@
 #endif
   Counters::reloc_info_count.Increment();
   ASSERT(rinfo->pc() - last_pc_ >= 0);
-  ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes);
+  ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
   // Use unsigned delta-encoding for pc.
   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
   RelocInfo::Mode rmode = rinfo->rmode();
@@ -350,12 +357,8 @@
       Advance();
       // Check if we want source positions.
       if (mode_mask_ & RelocInfo::kPositionMask) {
-        // Check if we want this type of source position.
-        if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) {
-          // Finally read the data before returning.
-          ReadTaggedData();
-          return;
-        }
+        ReadTaggedData();
+        if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
       }
     } else {
       ASSERT(tag == kDefaultTag);
@@ -390,7 +393,7 @@
 RelocIterator::RelocIterator(Code* code, int mode_mask) {
   rinfo_.pc_ = code->instruction_start();
   rinfo_.data_ = 0;
-  // relocation info is read backwards
+  // Relocation info is read backwards.
   pos_ = code->relocation_start() + code->relocation_size();
   end_ = code->relocation_start();
   done_ = false;
@@ -403,7 +406,7 @@
 RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
   rinfo_.pc_ = desc.buffer;
   rinfo_.data_ = 0;
-  // relocation info is read backwards
+  // Relocation info is read backwards.
   pos_ = desc.buffer + desc.buffer_size;
   end_ = pos_ - desc.reloc_size;
   done_ = false;
@@ -435,6 +438,8 @@
       return "debug break";
     case RelocInfo::CODE_TARGET:
       return "code target";
+    case RelocInfo::GLOBAL_PROPERTY_CELL:
+      return "global property cell";
     case RelocInfo::RUNTIME_ENTRY:
       return "runtime entry";
     case RelocInfo::JS_RETURN:
@@ -462,27 +467,35 @@
 }
 
 
-void RelocInfo::Print() {
-  PrintF("%p  %s", pc_, RelocModeName(rmode_));
+void RelocInfo::Print(FILE* out) {
+  PrintF(out, "%p  %s", pc_, RelocModeName(rmode_));
   if (IsComment(rmode_)) {
-    PrintF("  (%s)", reinterpret_cast<char*>(data_));
+    PrintF(out, "  (%s)", reinterpret_cast<char*>(data_));
   } else if (rmode_ == EMBEDDED_OBJECT) {
-    PrintF("  (");
-    target_object()->ShortPrint();
-    PrintF(")");
+    PrintF(out, "  (");
+    target_object()->ShortPrint(out);
+    PrintF(out, ")");
   } else if (rmode_ == EXTERNAL_REFERENCE) {
     ExternalReferenceEncoder ref_encoder;
-    PrintF(" (%s)  (%p)",
+    PrintF(out, " (%s)  (%p)",
            ref_encoder.NameOfAddress(*target_reference_address()),
            *target_reference_address());
   } else if (IsCodeTarget(rmode_)) {
     Code* code = Code::GetCodeFromTargetAddress(target_address());
-    PrintF(" (%s)  (%p)", Code::Kind2String(code->kind()), target_address());
+    PrintF(out, " (%s)  (%p)", Code::Kind2String(code->kind()),
+           target_address());
   } else if (IsPosition(rmode_)) {
-    PrintF("  (%" V8_PTR_PREFIX "d)", data());
+    PrintF(out, "  (%" V8_PTR_PREFIX "d)", data());
+  } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
+    // Depotimization bailouts are stored as runtime entries.
+    int id = Deoptimizer::GetDeoptimizationId(
+        target_address(), Deoptimizer::EAGER);
+    if (id != Deoptimizer::kNotDeoptimizationEntry) {
+      PrintF(out, "  (deoptimization bailout %d)", id);
+    }
   }
 
-  PrintF("\n");
+  PrintF(out, "\n");
 }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -493,6 +506,9 @@
     case EMBEDDED_OBJECT:
       Object::VerifyPointer(target_object());
       break;
+    case GLOBAL_PROPERTY_CELL:
+      Object::VerifyPointer(target_cell());
+      break;
     case DEBUG_BREAK:
 #ifndef ENABLE_DEBUGGER_SUPPORT
       UNREACHABLE();
@@ -599,6 +615,23 @@
 }
 
 
+ExternalReference ExternalReference::new_deoptimizer_function() {
+  return ExternalReference(
+      Redirect(FUNCTION_ADDR(Deoptimizer::New)));
+}
+
+
+ExternalReference ExternalReference::compute_output_frames_function() {
+  return ExternalReference(
+      Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
+}
+
+
+ExternalReference ExternalReference::global_contexts_list() {
+  return ExternalReference(Heap::global_contexts_list_address());
+}
+
+
 ExternalReference ExternalReference::keyed_lookup_cache_keys() {
   return ExternalReference(KeyedLookupCache::keys_address());
 }
@@ -679,6 +712,24 @@
 }
 
 
+ExternalReference ExternalReference::address_of_min_int() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::min_int)));
+}
+
+
+ExternalReference ExternalReference::address_of_one_half() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::one_half)));
+}
+
+
+ExternalReference ExternalReference::address_of_negative_infinity() {
+  return ExternalReference(reinterpret_cast<void*>(
+      const_cast<double*>(&DoubleConstant::negative_infinity)));
+}
+
+
 #ifndef V8_INTERPRETED_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state() {
@@ -750,6 +801,51 @@
 }
 
 
+// Helper function to compute x^y, where y is known to be an
+// integer. Uses binary decomposition to limit the number of
+// multiplications; see the discussion in "Hacker's Delight" by Henry
+// S. Warren, Jr., figure 11-6, page 213.
+double power_double_int(double x, int y) {
+  double m = (y < 0) ? 1 / x : x;
+  unsigned n = (y < 0) ? -y : y;
+  double p = 1;
+  while (n != 0) {
+    if ((n & 1) != 0) p *= m;
+    m *= m;
+    if ((n & 2) != 0) p *= m;
+    m *= m;
+    n >>= 2;
+  }
+  return p;
+}
+
+
+double power_double_double(double x, double y) {
+  int y_int = static_cast<int>(y);
+  if (y == y_int) {
+    return power_double_int(x, y_int);  // Returns 1.0 for exponent 0.
+  }
+  if (!isinf(x)) {
+    if (y == 0.5) return sqrt(x);
+    if (y == -0.5) return 1.0 / sqrt(x);
+  }
+  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+    return OS::nan_value();
+  }
+  return pow(x, y);
+}
+
+
+ExternalReference ExternalReference::power_double_double_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double)));
+}
+
+
+ExternalReference ExternalReference::power_double_int_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int)));
+}
+
+
 static int native_compare_doubles(double y, double x) {
   if (x == y) return EQUAL;
   return x < y ? LESS : GREATER;
@@ -805,19 +901,17 @@
 #endif
 
 
-void PositionsRecorder::RecordPosition(int pos,
-                                       PositionRecordingType recording_type) {
+void PositionsRecorder::RecordPosition(int pos) {
   ASSERT(pos != RelocInfo::kNoPosition);
   ASSERT(pos >= 0);
-  current_position_ = pos;
-  current_position_recording_type_ = recording_type;
+  state_.current_position = pos;
 }
 
 
 void PositionsRecorder::RecordStatementPosition(int pos) {
   ASSERT(pos != RelocInfo::kNoPosition);
   ASSERT(pos >= 0);
-  current_statement_position_ = pos;
+  state_.current_statement_position = pos;
 }
 
 
@@ -826,31 +920,26 @@
 
   // Write the statement position if it is different from what was written last
   // time.
-  if (current_statement_position_ != written_statement_position_) {
+  if (state_.current_statement_position != state_.written_statement_position) {
     EnsureSpace ensure_space(assembler_);
     assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION,
-                                current_statement_position_);
-    written_statement_position_ = current_statement_position_;
+                                state_.current_statement_position);
+    state_.written_statement_position = state_.current_statement_position;
     written = true;
   }
 
   // Write the position if it is different from what was written last time and
-  // also different from the written statement position or was forced.
-  if (current_position_ != written_position_ &&
-      (current_position_ != current_statement_position_ || !written) &&
-      (current_position_ != written_statement_position_
-       || current_position_recording_type_ == FORCED_POSITION)) {
+  // also different from the written statement position.
+  if (state_.current_position != state_.written_position &&
+      state_.current_position != state_.written_statement_position) {
     EnsureSpace ensure_space(assembler_);
-    assembler_->RecordRelocInfo(RelocInfo::POSITION, current_position_);
-    written_position_ = current_position_;
+    assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position);
+    state_.written_position = state_.current_position;
     written = true;
   }
 
-  current_position_recording_type_ = NORMAL_POSITION;
-
   // Return whether something was written.
   return written;
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/assembler.h b/src/assembler.h
index 09159fe..b68ad38 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -38,13 +38,23 @@
 #include "runtime.h"
 #include "top.h"
 #include "token.h"
-#include "objects.h"
 
 namespace v8 {
 namespace internal {
 
 
 // -----------------------------------------------------------------------------
+// Common double constants.
+
+class DoubleConstant: public AllStatic {
+ public:
+  static const double min_int;
+  static const double one_half;
+  static const double negative_infinity;
+};
+
+
+// -----------------------------------------------------------------------------
 // Labels represent pc locations; they are typically jump or call targets.
 // After declaration, a label can be freely used to denote known or (yet)
 // unknown pc location. Assembler::bind() is used to bind a label to the
@@ -174,6 +184,8 @@
     CODE_TARGET,  // Code target which is not any of the above.
     EMBEDDED_OBJECT,
 
+    GLOBAL_PROPERTY_CELL,
+
     // Everything after runtime_entry (inclusive) is not GC'ed.
     RUNTIME_ENTRY,
     JS_RETURN,  // Marks start of the ExitJSFrame code.
@@ -254,6 +266,10 @@
   INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
   INLINE(void set_target_object(Object* target));
+  INLINE(JSGlobalPropertyCell* target_cell());
+  INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
+  INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
+
 
   // Read the address of the word containing the target_address in an
   // instruction stream.  What this means exactly is architecture-independent.
@@ -306,7 +322,7 @@
 #ifdef ENABLE_DISASSEMBLER
   // Printing
   static const char* RelocModeName(Mode rmode);
-  void Print();
+  void Print(FILE* out);
 #endif  // ENABLE_DISASSEMBLER
 #ifdef DEBUG
   // Debugging
@@ -419,7 +435,7 @@
   // If the given mode is wanted, set it in rinfo_ and return true.
   // Else return false. Used for efficiently skipping unwanted modes.
   bool SetMode(RelocInfo::Mode mode) {
-    return (mode_mask_ & 1 << mode) ? (rinfo_.rmode_ = mode, true) : false;
+    return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
   }
 
   byte* pos_;
@@ -484,6 +500,11 @@
   static ExternalReference transcendental_cache_array_address();
   static ExternalReference delete_handle_scope_extensions();
 
+  // Deoptimization support.
+  static ExternalReference new_deoptimizer_function();
+  static ExternalReference compute_output_frames_function();
+  static ExternalReference global_contexts_list();
+
   // Static data in the keyed lookup cache.
   static ExternalReference keyed_lookup_cache_keys();
   static ExternalReference keyed_lookup_cache_field_offsets();
@@ -519,6 +540,8 @@
 
   static ExternalReference double_fp_operation(Token::Value operation);
   static ExternalReference compare_doubles();
+  static ExternalReference power_double_double_function();
+  static ExternalReference power_double_int_function();
 
   static ExternalReference handle_scope_next_address();
   static ExternalReference handle_scope_limit_address();
@@ -526,6 +549,11 @@
 
   static ExternalReference scheduled_exception_address();
 
+  // Static variables containing common double constants.
+  static ExternalReference address_of_min_int();
+  static ExternalReference address_of_one_half();
+  static ExternalReference address_of_negative_infinity();
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -587,23 +615,27 @@
 // -----------------------------------------------------------------------------
 // Position recording support
 
-enum PositionRecordingType { FORCED_POSITION, NORMAL_POSITION };
+struct PositionState {
+  PositionState() : current_position(RelocInfo::kNoPosition),
+                    written_position(RelocInfo::kNoPosition),
+                    current_statement_position(RelocInfo::kNoPosition),
+                    written_statement_position(RelocInfo::kNoPosition) {}
+
+  int current_position;
+  int written_position;
+
+  int current_statement_position;
+  int written_statement_position;
+};
+
 
 class PositionsRecorder BASE_EMBEDDED {
  public:
   explicit PositionsRecorder(Assembler* assembler)
-      : assembler_(assembler),
-        current_position_(RelocInfo::kNoPosition),
-        current_position_recording_type_(NORMAL_POSITION),
-        written_position_(RelocInfo::kNoPosition),
-        current_statement_position_(RelocInfo::kNoPosition),
-        written_statement_position_(RelocInfo::kNoPosition) { }
+      : assembler_(assembler) {}
 
-  // Set current position to pos. If recording_type is FORCED_POSITION then
-  // WriteRecordedPositions will write this position even if it is equal to
-  // statement position previously written for another pc.
-  void RecordPosition(int pos,
-                      PositionRecordingType recording_type = NORMAL_POSITION);
+  // Set current position to pos.
+  void RecordPosition(int pos);
 
   // Set current statement position to pos.
   void RecordStatementPosition(int pos);
@@ -611,37 +643,37 @@
   // Write recorded positions to relocation information.
   bool WriteRecordedPositions();
 
-  int current_position() const { return current_position_; }
+  int current_position() const { return state_.current_position; }
 
-  int current_statement_position() const { return current_statement_position_; }
+  int current_statement_position() const {
+    return state_.current_statement_position;
+  }
 
  private:
   Assembler* assembler_;
+  PositionState state_;
 
-  int current_position_;
-  PositionRecordingType current_position_recording_type_;
-  int written_position_;
+  friend class PreservePositionScope;
 
-  int current_statement_position_;
-  int written_statement_position_;
+  DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
 };
 
 
-class PreserveStatementPositionScope BASE_EMBEDDED {
+class PreservePositionScope BASE_EMBEDDED {
  public:
-  explicit PreserveStatementPositionScope(PositionsRecorder* positions_recorder)
+  explicit PreservePositionScope(PositionsRecorder* positions_recorder)
       : positions_recorder_(positions_recorder),
-        statement_position_(positions_recorder->current_statement_position()) {}
+        saved_state_(positions_recorder->state_) {}
 
-  ~PreserveStatementPositionScope() {
-    if (statement_position_ != RelocInfo::kNoPosition) {
-      positions_recorder_->RecordStatementPosition(statement_position_);
-    }
+  ~PreservePositionScope() {
+    positions_recorder_->state_ = saved_state_;
   }
 
  private:
   PositionsRecorder* positions_recorder_;
-  int statement_position_;
+  const PositionState saved_state_;
+
+  DISALLOW_COPY_AND_ASSIGN(PreservePositionScope);
 };
 
 
@@ -682,6 +714,10 @@
   return num_bits_set;
 }
 
+// Computes pow(x, y) with the special cases in the spec for Math.pow.
+double power_double_int(double x, int y);
+double power_double_double(double x, double y);
+
 } }  // namespace v8::internal
 
 #endif  // V8_ASSEMBLER_H_
diff --git a/src/ast-inl.h b/src/ast-inl.h
index f0a25c1..eb81c3a 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -25,18 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_AST_INL_H_
+#define V8_AST_INL_H_
+
 #include "v8.h"
 
 #include "ast.h"
+#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
 
-BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
-    : labels_(labels), type_(type) {
-  ASSERT(labels == NULL || labels->length() > 0);
-}
-
 
 SwitchStatement::SwitchStatement(ZoneStringList* labels)
     : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
@@ -44,13 +43,6 @@
 }
 
 
-IterationStatement::IterationStatement(ZoneStringList* labels)
-    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
-      body_(NULL),
-      continue_target_(JumpTarget::BIDIRECTIONAL) {
-}
-
-
 Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
     : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
       statements_(capacity),
@@ -58,23 +50,58 @@
 }
 
 
+BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
+    : labels_(labels),
+      type_(type),
+      entry_id_(GetNextId()),
+      exit_id_(GetNextId()) {
+  ASSERT(labels == NULL || labels->length() > 0);
+}
+
+
+IterationStatement::IterationStatement(ZoneStringList* labels)
+    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+      body_(NULL),
+      continue_target_(JumpTarget::BIDIRECTIONAL),
+      osr_entry_id_(GetNextId()) {
+}
+
+
+DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
+    : IterationStatement(labels),
+      cond_(NULL),
+      condition_position_(-1),
+      continue_id_(GetNextId()),
+      back_edge_id_(GetNextId()) {
+}
+
+
+WhileStatement::WhileStatement(ZoneStringList* labels)
+    : IterationStatement(labels),
+      cond_(NULL),
+      may_have_function_literal_(true),
+      body_id_(GetNextId()) {
+}
+
+
 ForStatement::ForStatement(ZoneStringList* labels)
     : IterationStatement(labels),
       init_(NULL),
       cond_(NULL),
       next_(NULL),
       may_have_function_literal_(true),
-      loop_variable_(NULL) {
+      loop_variable_(NULL),
+      continue_id_(GetNextId()),
+      body_id_(GetNextId()) {
 }
 
 
 ForInStatement::ForInStatement(ZoneStringList* labels)
-    : IterationStatement(labels), each_(NULL), enumerable_(NULL) {
+    : IterationStatement(labels), each_(NULL), enumerable_(NULL),
+      assignment_id_(GetNextId()) {
 }
 
 
-DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
-    : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
-}
-
 } }  // namespace v8::internal
+
+#endif  // V8_AST_INL_H_
diff --git a/src/ast.cc b/src/ast.cc
index bb445c4..895ab67 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -28,16 +28,17 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "jump-target-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
-#include "ast-inl.h"
-#include "jump-target-inl.h"
+#include "stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-
+unsigned AstNode::current_id_ = 0;
+unsigned AstNode::count_ = 0;
 VariableProxySentinel VariableProxySentinel::this_proxy_(true);
 VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
 ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
@@ -48,6 +49,8 @@
 // ----------------------------------------------------------------------------
 // All the Accept member functions for each syntax tree node type.
 
+void Slot::Accept(AstVisitor* v) { v->VisitSlot(this); }
+
 #define DECL_ACCEPT(type)                                       \
   void type::Accept(AstVisitor* v) { v->Visit##type(this); }
 AST_NODE_LIST(DECL_ACCEPT)
@@ -115,6 +118,30 @@
 }
 
 
+Assignment::Assignment(Token::Value op,
+                       Expression* target,
+                       Expression* value,
+                       int pos)
+    : op_(op),
+      target_(target),
+      value_(value),
+      pos_(pos),
+      binary_operation_(NULL),
+      compound_load_id_(kNoNumber),
+      assignment_id_(GetNextId()),
+      block_start_(false),
+      block_end_(false),
+      is_monomorphic_(false),
+      receiver_types_(NULL) {
+  ASSERT(Token::IsAssignmentOp(op));
+  if (is_compound()) {
+    binary_operation_ =
+        new BinaryOperation(binary_op(), target, value, pos + 1);
+    compound_load_id_ = GetNextId();
+  }
+}
+
+
 Token::Value Assignment::binary_op() const {
   switch (op_) {
     case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
@@ -139,6 +166,12 @@
 }
 
 
+bool FunctionLiteral::AllowOptimize() {
+  // We can't deal with heap-allocated locals.
+  return scope()->num_heap_slots() == 0;
+}
+
+
 ObjectLiteral::Property::Property(Literal* key, Expression* value) {
   emit_store_ = true;
   key_ = key;
@@ -373,6 +406,267 @@
 
 
 // ----------------------------------------------------------------------------
+// Inlining support
+
+bool Block::IsInlineable() const {
+  const int count = statements_.length();
+  for (int i = 0; i < count; ++i) {
+    if (!statements_[i]->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool ExpressionStatement::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool IfStatement::IsInlineable() const {
+  return condition()->IsInlineable() && then_statement()->IsInlineable() &&
+      else_statement()->IsInlineable();
+}
+
+
+bool ReturnStatement::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool Conditional::IsInlineable() const {
+  return condition()->IsInlineable() && then_expression()->IsInlineable() &&
+      else_expression()->IsInlineable();
+}
+
+
+bool VariableProxy::IsInlineable() const {
+  return var()->is_global() || var()->IsStackAllocated();
+}
+
+
+bool Assignment::IsInlineable() const {
+  return target()->IsInlineable() && value()->IsInlineable();
+}
+
+
+bool Property::IsInlineable() const {
+  return obj()->IsInlineable() && key()->IsInlineable();
+}
+
+
+bool Call::IsInlineable() const {
+  if (!expression()->IsInlineable()) return false;
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool CallNew::IsInlineable() const {
+  if (!expression()->IsInlineable()) return false;
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool CallRuntime::IsInlineable() const {
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool UnaryOperation::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool BinaryOperation::IsInlineable() const {
+  return left()->IsInlineable() && right()->IsInlineable();
+}
+
+
+bool CompareOperation::IsInlineable() const {
+  return left()->IsInlineable() && right()->IsInlineable();
+}
+
+
+bool CompareToNull::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool CountOperation::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+// ----------------------------------------------------------------------------
+// Recording of type feedback
+
+void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  // Record type feedback from the oracle in the AST.
+  is_monomorphic_ = oracle->LoadIsMonomorphic(this);
+  if (key()->IsPropertyName()) {
+    if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
+      is_array_length_ = true;
+    } else {
+      Literal* lit_key = key()->AsLiteral();
+      ASSERT(lit_key != NULL && lit_key->handle()->IsString());
+      Handle<String> name = Handle<String>::cast(lit_key->handle());
+      ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
+      receiver_types_ = types;
+    }
+  } else if (is_monomorphic_) {
+    monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
+  }
+}
+
+
+void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  Property* prop = target()->AsProperty();
+  ASSERT(prop != NULL);
+  is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+  if (prop->key()->IsPropertyName()) {
+    Literal* lit_key = prop->key()->AsLiteral();
+    ASSERT(lit_key != NULL && lit_key->handle()->IsString());
+    Handle<String> name = Handle<String>::cast(lit_key->handle());
+    ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
+    receiver_types_ = types;
+  } else if (is_monomorphic_) {
+    // Record receiver type for monomorphic keyed loads.
+    monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+  }
+}
+
+
+void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  TypeInfo info = oracle->SwitchType(this);
+  if (info.IsSmi()) {
+    compare_type_ = SMI_ONLY;
+  } else if (info.IsNonPrimitive()) {
+    compare_type_ = OBJECT_ONLY;
+  } else {
+    ASSERT(compare_type_ == NONE);
+  }
+}
+
+
+static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
+  SharedFunctionInfo* info = target->shared();
+  if (target->NeedsArgumentsAdaption()) {
+    // If the number of formal parameters of the target function
+    // does not match the number of arguments we're passing, we
+    // don't want to deal with it.
+    return info->formal_parameter_count() == arity;
+  } else {
+    // If the target doesn't need arguments adaption, we can call
+    // it directly, but we avoid to do so if it has a custom call
+    // generator, because that is likely to generate better code.
+    return !info->HasBuiltinFunctionId() ||
+        !CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
+  }
+}
+
+
+bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
+  holder_ = Handle<JSObject>::null();
+  while (true) {
+    LookupResult lookup;
+    type->LookupInDescriptors(NULL, *name, &lookup);
+    // If the function wasn't found directly in the map, we start
+    // looking upwards through the prototype chain.
+    if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
+      holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+      type = Handle<Map>(holder()->map());
+    } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+      target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
+      return CallWithoutIC(target_, arguments()->length());
+    } else {
+      return false;
+    }
+  }
+}
+
+
+bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
+                               Handle<String> name) {
+  target_ = Handle<JSFunction>::null();
+  cell_ = Handle<JSGlobalPropertyCell>::null();
+  LookupResult lookup;
+  global->Lookup(*name, &lookup);
+  if (lookup.IsProperty() && lookup.type() == NORMAL) {
+    cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
+    if (cell_->value()->IsJSFunction()) {
+      Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
+      // If the function is in new space we assume it's more likely to
+      // change and thus prefer the general IC code.
+      if (!Heap::InNewSpace(*candidate)
+          && CallWithoutIC(candidate, arguments()->length())) {
+        target_ = candidate;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+
+void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  Property* property = expression()->AsProperty();
+  ASSERT(property != NULL);
+  // Specialize for the receiver types seen at runtime.
+  Literal* key = property->key()->AsLiteral();
+  ASSERT(key != NULL && key->handle()->IsString());
+  Handle<String> name = Handle<String>::cast(key->handle());
+  receiver_types_ = oracle->CallReceiverTypes(this, name);
+#ifdef DEBUG
+  if (FLAG_enable_slow_asserts) {
+    if (receiver_types_ != NULL) {
+      int length = receiver_types_->length();
+      for (int i = 0; i < length; i++) {
+        Handle<Map> map = receiver_types_->at(i);
+        ASSERT(!map.is_null() && *map != NULL);
+      }
+    }
+  }
+#endif
+  if (receiver_types_ != NULL && receiver_types_->length() > 0) {
+    Handle<Map> type = receiver_types_->at(0);
+    is_monomorphic_ = oracle->CallIsMonomorphic(this);
+    if (is_monomorphic_) is_monomorphic_ = ComputeTarget(type, name);
+  }
+}
+
+
+void BinaryOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  TypeInfo left = oracle->BinaryType(this, TypeFeedbackOracle::LEFT);
+  TypeInfo right = oracle->BinaryType(this, TypeFeedbackOracle::RIGHT);
+  is_smi_only_ = left.IsSmi() && right.IsSmi();
+}
+
+
+void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  TypeInfo left = oracle->CompareType(this, TypeFeedbackOracle::LEFT);
+  TypeInfo right = oracle->CompareType(this, TypeFeedbackOracle::RIGHT);
+  if (left.IsSmi() && right.IsSmi()) {
+    compare_type_ = SMI_ONLY;
+  } else if (left.IsNonPrimitive() && right.IsNonPrimitive()) {
+    compare_type_ = OBJECT_ONLY;
+  } else {
+    ASSERT(compare_type_ == NONE);
+  }
+}
+
+
+// ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
 bool AstVisitor::CheckStackOverflow() {
@@ -742,15 +1036,12 @@
 }
 
 
-WhileStatement::WhileStatement(ZoneStringList* labels)
-    : IterationStatement(labels),
-      cond_(NULL),
-      may_have_function_literal_(true) {
-}
-
-
-CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
-    : label_(label), statements_(statements) {
-}
+CaseClause::CaseClause(Expression* label,
+                       ZoneList<Statement*>* statements,
+                       int pos)
+    : label_(label),
+      statements_(statements),
+      position_(pos),
+      compare_type_(NONE) {}
 
 } }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index 0846dbc..ed447e3 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -75,7 +75,6 @@
   V(FunctionLiteral)                            \
   V(SharedFunctionInfoLiteral)                  \
   V(Conditional)                                \
-  V(Slot)                                       \
   V(VariableProxy)                              \
   V(Literal)                                    \
   V(RegExpLiteral)                              \
@@ -102,10 +101,11 @@
   EXPRESSION_NODE_LIST(V)
 
 // Forward declarations
-class TargetCollector;
-class MaterializedLiteral;
-class DefinitionInfo;
 class BitVector;
+class DefinitionInfo;
+class MaterializedLiteral;
+class TargetCollector;
+class TypeFeedbackOracle;
 
 #define DEF_FORWARD_DECLARATION(type) class type;
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
@@ -133,6 +133,10 @@
   };
 #undef DECLARE_TYPE_ENUM
 
+  static const int kNoNumber = -1;
+
+  AstNode() : id_(GetNextId()) { count_++; }
+
   virtual ~AstNode() { }
 
   virtual void Accept(AstVisitor* v) = 0;
@@ -150,6 +154,27 @@
   virtual BreakableStatement* AsBreakableStatement() { return NULL; }
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
+  virtual Slot* AsSlot() { return NULL; }
+
+  // True if the node is simple enough for us to inline calls containing it.
+  virtual bool IsInlineable() const { return false; }
+
+  static int Count() { return count_; }
+  static void ResetIds() { current_id_ = 0; }
+  unsigned id() const { return id_; }
+
+ protected:
+  static unsigned GetNextId() { return current_id_++; }
+  static unsigned ReserveIdRange(int n) {
+    unsigned tmp = current_id_;
+    current_id_ += n;
+    return tmp;
+  }
+
+ private:
+  static unsigned current_id_;
+  static unsigned count_;
+  unsigned id_;
 };
 
 
@@ -174,6 +199,18 @@
 
 class Expression: public AstNode {
  public:
+  enum Context {
+    // Not assigned a context yet, or else will not be visited during
+    // code generation.
+    kUninitialized,
+    // Evaluated for its side effects.
+    kEffect,
+    // Evaluated for its value (and side effects).
+    kValue,
+    // Evaluated for control flow (and side effects).
+    kTest
+  };
+
   Expression() : bitfields_(0) {}
 
   virtual Expression* AsExpression()  { return this; }
@@ -181,6 +218,10 @@
   virtual bool IsTrivial() { return false; }
   virtual bool IsValidLeftHandSide() { return false; }
 
+  // Helpers for ToBoolean conversion.
+  virtual bool ToBooleanIsTrue() { return false; }
+  virtual bool ToBooleanIsFalse() { return false; }
+
   // Symbols that cannot be parsed as array indices are considered property
   // names.  We do not treat symbols that can be array indexes as property
   // names because [] for string objects is handled only by keyed ICs.
@@ -198,6 +239,24 @@
   // True iff the expression is a literal represented as a smi.
   virtual bool IsSmiLiteral() { return false; }
 
+  // Type feedback information for assignments and properties.
+  virtual bool IsMonomorphic() {
+    UNREACHABLE();
+    return false;
+  }
+  virtual bool IsArrayLength() {
+    UNREACHABLE();
+    return false;
+  }
+  virtual ZoneMapList* GetReceiverTypes() {
+    UNREACHABLE();
+    return NULL;
+  }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    UNREACHABLE();
+    return Handle<Map>();
+  }
+
   // Static type information for this expression.
   StaticType* type() { return &type_; }
 
@@ -301,6 +360,10 @@
   // Testers.
   bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
 
+  // Bailout support.
+  int EntryId() const { return entry_id_; }
+  int ExitId() const { return exit_id_; }
+
  protected:
   inline BreakableStatement(ZoneStringList* labels, Type type);
 
@@ -308,6 +371,8 @@
   ZoneStringList* labels_;
   Type type_;
   BreakTarget break_target_;
+  int entry_id_;
+  int exit_id_;
 };
 
 
@@ -327,6 +392,8 @@
     return statements_[0]->StatementAsCountOperation();
   }
 
+  virtual bool IsInlineable() const;
+
   void AddStatement(Statement* statement) { statements_.Add(statement); }
 
   ZoneList<Statement*>* statements() { return &statements_; }
@@ -368,7 +435,10 @@
   virtual IterationStatement* AsIterationStatement() { return this; }
 
   Statement* body() const { return body_; }
-  void set_body(Statement* stmt) { body_ = stmt; }
+
+  // Bailout support.
+  int OsrEntryId() const { return osr_entry_id_; }
+  virtual int ContinueId() const = 0;
 
   // Code generation
   BreakTarget* continue_target()  { return &continue_target_; }
@@ -383,6 +453,7 @@
  private:
   Statement* body_;
   BreakTarget continue_target_;
+  int osr_entry_id_;
 };
 
 
@@ -404,15 +475,21 @@
   int condition_position() { return condition_position_; }
   void set_condition_position(int pos) { condition_position_ = pos; }
 
+  // Bailout support.
+  virtual int ContinueId() const { return continue_id_; }
+  int BackEdgeId() const { return back_edge_id_; }
+
  private:
   Expression* cond_;
   int condition_position_;
+  int continue_id_;
+  int back_edge_id_;
 };
 
 
 class WhileStatement: public IterationStatement {
  public:
-  explicit WhileStatement(ZoneStringList* labels);
+  explicit inline WhileStatement(ZoneStringList* labels);
 
   DECLARE_NODE_TYPE(WhileStatement)
 
@@ -429,10 +506,15 @@
     may_have_function_literal_ = value;
   }
 
+  // Bailout support.
+  virtual int ContinueId() const { return EntryId(); }
+  int BodyId() const { return body_id_; }
+
  private:
   Expression* cond_;
   // True if there is a function literal subexpression in the condition.
   bool may_have_function_literal_;
+  int body_id_;
 };
 
 
@@ -453,11 +535,8 @@
   }
 
   Statement* init() const { return init_; }
-  void set_init(Statement* stmt) { init_ = stmt; }
   Expression* cond() const { return cond_; }
-  void set_cond(Expression* expr) { cond_ = expr; }
   Statement* next() const { return next_; }
-  void set_next(Statement* stmt) { next_ = stmt; }
 
   bool may_have_function_literal() const {
     return may_have_function_literal_;
@@ -466,6 +545,10 @@
     may_have_function_literal_ = value;
   }
 
+  // Bailout support.
+  virtual int ContinueId() const { return continue_id_; }
+  int BodyId() const { return body_id_; }
+
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
   Variable* loop_variable() { return loop_variable_; }
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
@@ -477,6 +560,8 @@
   // True if there is a function literal subexpression in the condition.
   bool may_have_function_literal_;
   Variable* loop_variable_;
+  int continue_id_;
+  int body_id_;
 };
 
 
@@ -495,9 +580,14 @@
   Expression* each() const { return each_; }
   Expression* enumerable() const { return enumerable_; }
 
+  // Bailout support.
+  int AssignmentId() const { return assignment_id_; }
+  virtual int ContinueId() const { return EntryId(); }
+
  private:
   Expression* each_;
   Expression* enumerable_;
+  int assignment_id_;
 };
 
 
@@ -508,11 +598,13 @@
 
   DECLARE_NODE_TYPE(ExpressionStatement)
 
+  virtual bool IsInlineable() const;
+
   virtual Assignment* StatementAsSimpleAssignment();
   virtual CountOperation* StatementAsCountOperation();
 
   void set_expression(Expression* e) { expression_ = e; }
-  Expression* expression() { return expression_; }
+  Expression* expression() const { return expression_; }
 
  private:
   Expression* expression_;
@@ -554,7 +646,8 @@
 
   DECLARE_NODE_TYPE(ReturnStatement)
 
-  Expression* expression() { return expression_; }
+  Expression* expression() const { return expression_; }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* expression_;
@@ -588,7 +681,7 @@
 
 class CaseClause: public ZoneObject {
  public:
-  CaseClause(Expression* label, ZoneList<Statement*>* statements);
+  CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
 
   bool is_default() const { return label_ == NULL; }
   Expression* label() const {
@@ -598,10 +691,21 @@
   JumpTarget* body_target() { return &body_target_; }
   ZoneList<Statement*>* statements() const { return statements_; }
 
+  int position() { return position_; }
+  void set_position(int pos) { position_ = pos; }
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+
  private:
   Expression* label_;
   JumpTarget body_target_;
   ZoneList<Statement*>* statements_;
+  int position_;
+  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  CompareTypeFeedback compare_type_;
 };
 
 
@@ -637,23 +741,31 @@
               Statement* else_statement)
       : condition_(condition),
         then_statement_(then_statement),
-        else_statement_(else_statement) { }
+        else_statement_(else_statement),
+        then_id_(GetNextId()),
+        else_id_(GetNextId()) {
+  }
 
   DECLARE_NODE_TYPE(IfStatement)
 
+  virtual bool IsInlineable() const;
+
   bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
   bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
 
   Expression* condition() const { return condition_; }
   Statement* then_statement() const { return then_statement_; }
-  void set_then_statement(Statement* stmt) { then_statement_ = stmt; }
   Statement* else_statement() const { return else_statement_; }
-  void set_else_statement(Statement* stmt) { else_statement_ = stmt; }
+
+  int ThenId() const { return then_id_; }
+  int ElseId() const { return else_id_; }
 
  private:
   Expression* condition_;
   Statement* then_statement_;
   Statement* else_statement_;
+  int then_id_;
+  int else_id_;
 };
 
 
@@ -744,6 +856,8 @@
 class EmptyStatement: public Statement {
  public:
   DECLARE_NODE_TYPE(EmptyStatement)
+
+  virtual bool IsInlineable() const { return true; }
 };
 
 
@@ -754,6 +868,7 @@
   DECLARE_NODE_TYPE(Literal)
 
   virtual bool IsTrivial() { return true; }
+  virtual bool IsInlineable() const { return true; }
   virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
 
   // Check if this literal is identical to the other literal.
@@ -769,6 +884,14 @@
     return false;
   }
 
+  Handle<String> AsPropertyName() {
+    ASSERT(IsPropertyName());
+    return Handle<String>::cast(handle_);
+  }
+
+  virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); }
+  virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); }
+
   // Identity testers.
   bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
   bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -906,16 +1029,21 @@
                int depth)
       : MaterializedLiteral(literal_index, is_simple, depth),
         constant_elements_(constant_elements),
-        values_(values) {}
+        values_(values),
+        first_element_id_(ReserveIdRange(values->length())) {}
 
   DECLARE_NODE_TYPE(ArrayLiteral)
 
   Handle<FixedArray> constant_elements() const { return constant_elements_; }
   ZoneList<Expression*>* values() const { return values_; }
 
+  // Return an AST id for an element that is used in simulate instructions.
+  int GetIdForElement(int i) { return first_element_id_ + i; }
+
  private:
   Handle<FixedArray> constant_elements_;
   ZoneList<Expression*>* values_;
+  int first_element_id_;
 };
 
 
@@ -967,6 +1095,8 @@
     return is_this_ || is_trivial_;
   }
 
+  virtual bool IsInlineable() const;
+
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
   }
@@ -1044,7 +1174,9 @@
     ASSERT(var != NULL);
   }
 
-  DECLARE_NODE_TYPE(Slot)
+  virtual void Accept(AstVisitor* v);
+
+  virtual Slot* AsSlot() { return this; }
 
   bool IsStackAllocated() { return type_ == PARAMETER || type_ == LOCAL; }
 
@@ -1069,17 +1201,41 @@
   // of the resolved Reference.
   enum Type { NORMAL, SYNTHETIC };
   Property(Expression* obj, Expression* key, int pos, Type type = NORMAL)
-      : obj_(obj), key_(key), pos_(pos), type_(type) { }
+      : obj_(obj),
+        key_(key),
+        pos_(pos),
+        type_(type),
+        is_monomorphic_(false),
+        receiver_types_(NULL),
+        is_array_length_(false),
+        is_arguments_access_(false) { }
 
   DECLARE_NODE_TYPE(Property)
 
   virtual bool IsValidLeftHandSide() { return true; }
+  virtual bool IsInlineable() const;
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
   int position() const { return pos_; }
   bool is_synthetic() const { return type_ == SYNTHETIC; }
 
+  // Marks that this is actually an argument rewritten to a keyed property
+  // accessing the argument through the arguments shadow object.
+  void set_is_arguments_access(bool is_arguments_access) {
+    is_arguments_access_ = is_arguments_access;
+  }
+  bool is_arguments_access() const { return is_arguments_access_; }
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual bool IsArrayLength() { return is_array_length_; }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    return monomorphic_receiver_type_;
+  }
+
   // Returns a property singleton property access on 'this'.  Used
   // during preparsing.
   static Property* this_property() { return &this_property_; }
@@ -1090,6 +1246,12 @@
   int pos_;
   Type type_;
 
+  bool is_monomorphic_;
+  ZoneMapList* receiver_types_;
+  bool is_array_length_;
+  bool is_arguments_access_;
+  Handle<Map> monomorphic_receiver_type_;
+
   // Dummy property used during preparsing.
   static Property this_property_;
 };
@@ -1098,21 +1260,55 @@
 class Call: public Expression {
  public:
   Call(Expression* expression, ZoneList<Expression*>* arguments, int pos)
-      : expression_(expression), arguments_(arguments), pos_(pos) { }
+      : expression_(expression),
+        arguments_(arguments),
+        pos_(pos),
+        is_monomorphic_(false),
+        receiver_types_(NULL),
+        return_id_(GetNextId()) {
+  }
 
   DECLARE_NODE_TYPE(Call)
 
+  virtual bool IsInlineable() const;
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   int position() { return pos_; }
 
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  Handle<JSFunction> target() { return target_; }
+  Handle<JSObject> holder() { return holder_; }
+  Handle<JSGlobalPropertyCell> cell() { return cell_; }
+
+  bool ComputeTarget(Handle<Map> type, Handle<String> name);
+  bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
+
+  // Bailout support.
+  int ReturnId() const { return return_id_; }
+
   static Call* sentinel() { return &sentinel_; }
 
+#ifdef DEBUG
+  // Used to assert that the FullCodeGenerator records the return site.
+  bool return_is_recorded_;
+#endif
+
  private:
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   int pos_;
 
+  bool is_monomorphic_;
+  ZoneMapList* receiver_types_;
+  Handle<JSFunction> target_;
+  Handle<JSObject> holder_;
+  Handle<JSGlobalPropertyCell> cell_;
+
+  int return_id_;
+
   static Call sentinel_;
 };
 
@@ -1124,6 +1320,8 @@
 
   DECLARE_NODE_TYPE(CallNew)
 
+  virtual bool IsInlineable() const;
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   int position() { return pos_; }
@@ -1148,6 +1346,8 @@
 
   DECLARE_NODE_TYPE(CallRuntime)
 
+  virtual bool IsInlineable() const;
+
   Handle<String> name() const { return name_; }
   Runtime::Function* function() const { return function_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
@@ -1169,6 +1369,8 @@
 
   DECLARE_NODE_TYPE(UnaryOperation)
 
+  virtual bool IsInlineable() const;
+
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
@@ -1186,8 +1388,11 @@
                   Expression* left,
                   Expression* right,
                   int pos)
-      : op_(op), left_(left), right_(right), pos_(pos) {
+      : op_(op), left_(left), right_(right), pos_(pos), is_smi_only_(false) {
     ASSERT(Token::IsBinaryOp(op));
+    right_id_ = (op == Token::AND || op == Token::OR)
+        ? GetNextId()
+        : AstNode::kNoNumber;
   }
 
   // Create the binary operation corresponding to a compound assignment.
@@ -1195,6 +1400,8 @@
 
   DECLARE_NODE_TYPE(BinaryOperation)
 
+  virtual bool IsInlineable() const;
+
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
@@ -1202,11 +1409,22 @@
   Expression* right() const { return right_; }
   int position() const { return pos_; }
 
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiOnly() const { return is_smi_only_; }
+
+  // Bailout support.
+  int RightId() const { return right_id_; }
+
  private:
   Token::Value op_;
   Expression* left_;
   Expression* right_;
   int pos_;
+  bool is_smi_only_;
+  // The short-circuit logical operations have an AST ID for their
+  // right-hand subexpression.
+  int right_id_;
 };
 
 
@@ -1233,7 +1451,9 @@
 class CountOperation: public Expression {
  public:
   CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
-      : is_prefix_(is_prefix), increment_(increment), pos_(pos) { }
+      : is_prefix_(is_prefix), increment_(increment), pos_(pos),
+        assignment_id_(GetNextId()) {
+  }
 
   DECLARE_NODE_TYPE(CountOperation)
 
@@ -1251,10 +1471,16 @@
 
   virtual void MarkAsStatement() { is_prefix_ = true; }
 
+  virtual bool IsInlineable() const;
+
+  // Bailout support.
+  int AssignmentId() const { return assignment_id_; }
+
  private:
   bool is_prefix_;
   IncrementOperation* increment_;
   int pos_;
+  int assignment_id_;
 };
 
 
@@ -1264,7 +1490,7 @@
                    Expression* left,
                    Expression* right,
                    int pos)
-      : op_(op), left_(left), right_(right), pos_(pos) {
+      : op_(op), left_(left), right_(right), pos_(pos), compare_type_(NONE) {
     ASSERT(Token::IsCompareOp(op));
   }
 
@@ -1275,11 +1501,21 @@
   Expression* right() const { return right_; }
   int position() const { return pos_; }
 
+  virtual bool IsInlineable() const;
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+
  private:
   Token::Value op_;
   Expression* left_;
   Expression* right_;
   int pos_;
+
+  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  CompareTypeFeedback compare_type_;
 };
 
 
@@ -1290,6 +1526,8 @@
 
   DECLARE_NODE_TYPE(CompareToNull)
 
+  virtual bool IsInlineable() const;
+
   bool is_strict() const { return is_strict_; }
   Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
   Expression* expression() const { return expression_; }
@@ -1311,16 +1549,24 @@
         then_expression_(then_expression),
         else_expression_(else_expression),
         then_expression_position_(then_expression_position),
-        else_expression_position_(else_expression_position) { }
+        else_expression_position_(else_expression_position),
+        then_id_(GetNextId()),
+        else_id_(GetNextId()) {
+  }
 
   DECLARE_NODE_TYPE(Conditional)
 
+  virtual bool IsInlineable() const;
+
   Expression* condition() const { return condition_; }
   Expression* then_expression() const { return then_expression_; }
   Expression* else_expression() const { return else_expression_; }
 
-  int then_expression_position() { return then_expression_position_; }
-  int else_expression_position() { return else_expression_position_; }
+  int then_expression_position() const { return then_expression_position_; }
+  int else_expression_position() const { return else_expression_position_; }
+
+  int ThenId() const { return then_id_; }
+  int ElseId() const { return else_id_; }
 
  private:
   Expression* condition_;
@@ -1328,19 +1574,19 @@
   Expression* else_expression_;
   int then_expression_position_;
   int else_expression_position_;
+  int then_id_;
+  int else_id_;
 };
 
 
 class Assignment: public Expression {
  public:
-  Assignment(Token::Value op, Expression* target, Expression* value, int pos)
-      : op_(op), target_(target), value_(value), pos_(pos),
-        block_start_(false), block_end_(false) {
-    ASSERT(Token::IsAssignmentOp(op));
-  }
+  Assignment(Token::Value op, Expression* target, Expression* value, int pos);
 
   DECLARE_NODE_TYPE(Assignment)
 
+  virtual bool IsInlineable() const;
+
   Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
 
   Token::Value binary_op() const;
@@ -1349,6 +1595,8 @@
   Expression* target() const { return target_; }
   Expression* value() const { return value_; }
   int position() { return pos_; }
+  BinaryOperation* binary_operation() const { return binary_operation_; }
+
   // This check relies on the definition order of token in token.h.
   bool is_compound() const { return op() > Token::ASSIGN; }
 
@@ -1361,13 +1609,33 @@
   void mark_block_start() { block_start_ = true; }
   void mark_block_end() { block_end_ = true; }
 
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    return monomorphic_receiver_type_;
+  }
+
+  // Bailout support.
+  int CompoundLoadId() const { return compound_load_id_; }
+  int AssignmentId() const { return assignment_id_; }
+
  private:
   Token::Value op_;
   Expression* target_;
   Expression* value_;
   int pos_;
+  BinaryOperation* binary_operation_;
+  int compound_load_id_;
+  int assignment_id_;
+
   bool block_start_;
   bool block_end_;
+
+  bool is_monomorphic_;
+  ZoneMapList* receiver_types_;
+  Handle<Map> monomorphic_receiver_type_;
 };
 
 
@@ -1417,11 +1685,7 @@
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(Heap::empty_string()),
         try_full_codegen_(false),
-        pretenure_(false) {
-#ifdef DEBUG
-    already_compiled_ = false;
-#endif
-  }
+        pretenure_(false) { }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
 
@@ -1446,6 +1710,7 @@
   int num_parameters() { return num_parameters_; }
 
   bool AllowsLazyCompilation();
+  bool AllowOptimize();
 
   Handle<String> debug_name() const {
     if (name_->length() > 0) return name_;
@@ -1463,13 +1728,6 @@
   bool pretenure() { return pretenure_; }
   void set_pretenure(bool value) { pretenure_ = value; }
 
-#ifdef DEBUG
-  void mark_as_compiled() {
-    ASSERT(!already_compiled_);
-    already_compiled_ = true;
-  }
-#endif
-
  private:
   Handle<String> name_;
   Scope* scope_;
@@ -1487,9 +1745,6 @@
   Handle<String> inferred_name_;
   bool try_full_codegen_;
   bool pretenure_;
-#ifdef DEBUG
-  bool already_compiled_;
-#endif
 };
 
 
@@ -1894,8 +2149,12 @@
   // node, calling SetStackOverflow will make sure that the visitor
   // bails out without visiting more nodes.
   void SetStackOverflow() { stack_overflow_ = true; }
+  void ClearStackOverflow() { stack_overflow_ = false; }
 
-  // Individual nodes
+  // Nodes not appearing in the AST, including slots.
+  virtual void VisitSlot(Slot* node) { UNREACHABLE(); }
+
+  // Individual AST nodes.
 #define DEF_VISIT(type)                         \
   virtual void Visit##type(type* node) = 0;
   AST_NODE_LIST(DEF_VISIT)
diff --git a/src/atomicops.h b/src/atomicops.h
new file mode 100644
index 0000000..72a0d0f
--- /dev/null
+++ b/src/atomicops.h
@@ -0,0 +1,165 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The routines exported by this module are subtle.  If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain.  If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative.  You should assume only properties explicitly guaranteed by the
+// specifications in this file.  You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break.  If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines.  The NoBarrier
+// versions are provided when no barriers are needed:
+//   NoBarrier_Store()
+//   NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef V8_ATOMICOPS_H_
+#define V8_ATOMICOPS_H_
+
+#include "../include/v8.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+typedef int32_t Atomic32;
+#ifdef V8_HOST_ARCH_64_BIT
+// We need to be able to go between Atomic64 and AtomicWord implicitly.  This
+// means Atomic64 and AtomicWord should be the same type on 64-bit.
+#if defined(__APPLE__)
+// MacOS is an exception to the implicit conversion rule above,
+// because it uses long for intptr_t.
+typedef int64_t Atomic64;
+#else
+typedef intptr_t Atomic64;
+#endif
+#endif
+
+// Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+typedef intptr_t AtomicWord;
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                  Atomic32 old_value,
+                                  Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                 Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions.  "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                Atomic32 old_value,
+                                Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef V8_HOST_ARCH_64_BIT
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                  Atomic64 old_value,
+                                  Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                Atomic64 old_value,
+                                Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif  // V8_HOST_ARCH_64_BIT
+
+} }  // namespace v8::internal
+
+// Include our platform specific implementation.
+#if defined(_MSC_VER) && \
+  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#include "atomicops_internals_x86_msvc.h"
+#elif defined(__APPLE__) && \
+  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#include "atomicops_internals_x86_macosx.h"
+#elif defined(__GNUC__) && \
+  (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
+#include "atomicops_internals_x86_gcc.h"
+#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
+#include "atomicops_internals_arm_gcc.h"
+#else
+#error "Atomic operations are not supported on your platform"
+#endif
+
+#endif  // V8_ATOMICOPS_H_
diff --git a/src/atomicops_internals_arm_gcc.h b/src/atomicops_internals_arm_gcc.h
new file mode 100644
index 0000000..6c30256
--- /dev/null
+++ b/src/atomicops_internals_arm_gcc.h
@@ -0,0 +1,145 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace v8 {
+namespace internal {
+
+// 0xffff0fc0 is the hard coded address of a function provided by
+// the kernel which implements an atomic compare-exchange. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
+                                           Atomic32 new_value,
+                                           volatile Atomic32* ptr);
+LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
+    (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value = *ptr;
+  do {
+    if (!pLinuxKernelCmpxchg(old_value, new_value,
+                             const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (pLinuxKernelCmpxchg(old_value, new_value,
+                               const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  for (;;) {
+    // Atomic exchange the old value with an incremented one.
+    Atomic32 old_value = *ptr;
+    Atomic32 new_value = old_value + increment;
+    if (pLinuxKernelCmpxchg(old_value, new_value,
+                            const_cast<Atomic32*>(ptr)) == 0) {
+      // The exchange took place as expected.
+      return new_value;
+    }
+    // Otherwise, *ptr changed mid-loop and we need to retry.
+  }
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  pLinuxKernelMemoryBarrier();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/src/atomicops_internals_x86_gcc.cc b/src/atomicops_internals_x86_gcc.cc
new file mode 100644
index 0000000..a572564
--- /dev/null
+++ b/src/atomicops_internals_x86_gcc.cc
@@ -0,0 +1,126 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <string.h>
+
+#include "atomicops.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file.  If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
+// of the global offset table.  To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%ebx, %%edi\n"     \
+      "cpuid\n"                \
+      "xchg %%edi, %%ebx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined(__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+  asm("mov %%rbx, %%rdi\n"     \
+      "cpuid\n"                \
+      "xchg %%rdi, %%rbx\n"    \
+      : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid)        // initialize the struct only on x86
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+  false,          // bug can't exist before process spawns multiple threads
+  false,          // no SSE2
+};
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+static void AtomicOps_Internalx86CPUFeaturesInit() {
+  uint32_t eax;
+  uint32_t ebx;
+  uint32_t ecx;
+  uint32_t edx;
+
+  // Get vendor string (issue CPUID with eax = 0)
+  cpuid(eax, ebx, ecx, edx, 0);
+  char vendor[13];
+  memcpy(vendor, &ebx, 4);
+  memcpy(vendor + 4, &edx, 4);
+  memcpy(vendor + 8, &ecx, 4);
+  vendor[12] = 0;
+
+  // get feature flags in ecx/edx, and family/model in eax
+  cpuid(eax, ebx, ecx, edx, 1);
+
+  int family = (eax >> 8) & 0xf;        // family and model fields
+  int model = (eax >> 4) & 0xf;
+  if (family == 0xf) {                  // use extended family and model fields
+    family += (eax >> 20) & 0xff;
+    model += ((eax >> 16) & 0xf) << 4;
+  }
+
+  // Opteron Rev E has a bug in which on very rare occasions a locked
+  // instruction doesn't act as a read-acquire barrier if followed by a
+  // non-locked read-modify-write instruction.  Rev F has this bug in
+  // pre-release versions, but not in versions released to customers,
+  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
+      family == 15 &&
+      32 <= model && model <= 63) {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+  } else {
+    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+  }
+
+  // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+  AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+}
+
+namespace {
+
+class AtomicOpsx86Initializer {
+ public:
+  AtomicOpsx86Initializer() {
+    AtomicOps_Internalx86CPUFeaturesInit();
+  }
+};
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+}  // namespace
+
+#endif  // if x86
+
+#endif  // ifdef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/atomicops_internals_x86_gcc.h b/src/atomicops_internals_x86_gcc.h
new file mode 100644
index 0000000..3f17fa0
--- /dev/null
+++ b/src/atomicops_internals_x86_gcc.h
@@ -0,0 +1,287 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86.  Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
+                             // after acquire compare-and-swap.
+  bool has_sse2;             // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace v8 {
+namespace internal {
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev;
+  __asm__ __volatile__("lock; cmpxchgl %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  Atomic32 temp = increment;
+  __asm__ __volatile__("lock; xaddl %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now holds the old value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+  __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {  // mfence is faster but not present on PIII
+    Atomic32 x = 0;
+    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII
+  }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+    *ptr = value;
+    __asm__ __volatile__("mfence" : : : "memory");
+  } else {
+    NoBarrier_AtomicExchange(ptr, value);
+                          // acts as a barrier on PIII
+  }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+  *ptr = value;  // An x86 store acts as a release barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier.
+  // See comments in Atomic64 version of Release_Store(), below.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev;
+  __asm__ __volatile__("lock; cmpxchgq %1,%2"
+                       : "=a" (prev)
+                       : "q" (new_value), "m" (*ptr), "0" (old_value)
+                       : "memory");
+  return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
+                       : "=r" (new_value)
+                       : "m" (*ptr), "0" (new_value)
+                       : "memory");
+  return new_value;  // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  Atomic64 temp = increment;
+  __asm__ __volatile__("lock; xaddq %0,%1"
+                       : "+r" (temp), "+m" (*ptr)
+                       : : "memory");
+  // temp now contains the previous value of *ptr
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ATOMICOPS_COMPILER_BARRIER();
+
+  *ptr = value;  // An x86 store acts as a release barrier
+                 // for current AMD/Intel chips as of Jan 2008.
+                 // See also Acquire_Load(), below.
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+  //
+  // x86 stores/loads fail to act as barriers for a few instructions (clflush
+  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+  // not generated by the compiler, and are rare.  Users of these instructions
+  // need to know about cache behaviour in any case since all of these involve
+  // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;  // An x86 load acts as a acquire barrier,
+                          // for current AMD/Intel chips as of Jan 2008.
+                          // See also Release_Store(), above.
+  ATOMICOPS_COMPILER_BARRIER();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+    __asm__ __volatile__("lfence" : : : "memory");
+  }
+  return x;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+#endif  // defined(__x86_64__)
+
+} }  // namespace v8::internal
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif  // V8_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h
new file mode 100644
index 0000000..2bac006
--- /dev/null
+++ b/src/atomicops_internals_x86_macosx.h
@@ -0,0 +1,301 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace v8 {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32(old_value, new_value,
+                                 const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+                                     const_cast<Atomic32*>(ptr)));
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+                                          Atomic32 increment) {
+  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+  OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+                                        const_cast<Atomic32*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64(old_value, new_value,
+                                 const_cast<Atomic64*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+                                         Atomic64 new_value) {
+  Atomic64 old_value;
+  do {
+    old_value = *ptr;
+  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+                                     const_cast<Atomic64*>(ptr)));
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                          Atomic64 increment) {
+  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+                                        Atomic64 increment) {
+  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 prev_value;
+  do {
+    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
+                                        const_cast<Atomic64*>(ptr))) {
+      return old_value;
+    }
+    prev_value = *ptr;
+  } while (prev_value == old_value);
+  return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  // The lib kern interface does not distinguish between
+  // Acquire and Release memory barriers; they are equivalent.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#endif  // defined(__LP64__)
+
+// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
+// on the Mac, even when they are the same size.  We need to explicitly cast
+// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
+#ifdef __LP64__
+#define AtomicWordCastType Atomic64
+#else
+#define AtomicWordCastType Atomic32
+#endif
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+      old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::internal::Acquire_CompareAndSwap(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+      old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::internal::Release_CompareAndSwap(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+      old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::internal::Acquire_Store(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::internal::Release_Store(
+      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(
+      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return v8::internal::Acquire_Load(
+      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return v8::internal::Release_Load(
+      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+#undef AtomicWordCastType
+
+} }  // namespace v8::internal
+
+#endif  // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
diff --git a/src/atomicops_internals_x86_msvc.h b/src/atomicops_internals_x86_msvc.h
new file mode 100644
index 0000000..fcf6a65
--- /dev/null
+++ b/src/atomicops_internals_x86_msvc.h
@@ -0,0 +1,203 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include "checks.h"
+#include "win32-headers.h"
+
+namespace v8 {
+namespace internal {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  LONG result = InterlockedCompareExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value),
+      static_cast<LONG>(old_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  LONG result = InterlockedExchange(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(new_value));
+  return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return InterlockedExchangeAdd(
+      reinterpret_cast<volatile LONG*>(ptr),
+      static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
+#error "We require at least vs2005 for MemoryBarrier"
+#endif
+inline void MemoryBarrier() {
+  // We use MemoryBarrier from WinNT.h
+  ::MemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+  // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedCompareExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  PVOID result = InterlockedExchangePointer(
+    reinterpret_cast<volatile PVOID*>(ptr),
+    reinterpret_cast<PVOID>(new_value));
+  return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return InterlockedExchangeAdd64(
+      reinterpret_cast<volatile LONGLONG*>(ptr),
+      static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  NoBarrier_AtomicExchange(ptr, value);
+              // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
+
+  // When new chips come out, check:
+  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+  //  System Programming Guide, Chatper 7: Multiple-processor management,
+  //  Section 7.2, Memory Ordering.
+  // Last seen at:
+  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+
+#endif  // defined(_WIN64)
+
+} }  // namespace v8::internal
+
+#endif  // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index f60a975..cae1a9a 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -38,7 +38,6 @@
 #include "natives.h"
 #include "objects-visiting.h"
 #include "snapshot.h"
-#include "stub-cache.h"
 #include "extensions/externalize-string-extension.h"
 #include "extensions/gc-extension.h"
 
@@ -234,7 +233,7 @@
   // Used for creating a context from scratch.
   void InstallNativeFunctions();
   bool InstallNatives();
-  void InstallCustomCallGenerators();
+  void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
   // Used both for deserialized and from-scratch contexts to add the extensions
@@ -500,6 +499,24 @@
 }
 
 
+static void AddToWeakGlobalContextList(Context* context) {
+  ASSERT(context->IsGlobalContext());
+#ifdef DEBUG
+  { // NOLINT
+    ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
+    // Check that context is not in the list yet.
+    for (Object* current = Heap::global_contexts_list();
+         !current->IsUndefined();
+         current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
+      ASSERT(current != context);
+    }
+  }
+#endif
+  context->set(Context::NEXT_CONTEXT_LINK, Heap::global_contexts_list());
+  Heap::set_global_contexts_list(context);
+}
+
+
 void Genesis::CreateRoots() {
   // Allocate the global context FixedArray first and then patch the
   // closure and extension object later (we need the empty function
@@ -508,6 +525,7 @@
   global_context_ =
       Handle<Context>::cast(
           GlobalHandles::Create(*Factory::NewGlobalContext()));
+  AddToWeakGlobalContextList(*global_context_);
   Top::set_context(*global_context());
 
   // Allocate the message listeners object.
@@ -1251,7 +1269,7 @@
   global_context()->set_string_function_prototype_map(
       HeapObject::cast(string_function->initial_map()->prototype())->map());
 
-  InstallCustomCallGenerators();
+  InstallBuiltinFunctionIds();
 
   // Install Function.prototype.call and apply.
   { Handle<String> key = Factory::function_class_symbol();
@@ -1350,7 +1368,7 @@
 }
 
 
-static Handle<JSObject> ResolveCustomCallGeneratorHolder(
+static Handle<JSObject> ResolveBuiltinIdHolder(
     Handle<Context> global_context,
     const char* holder_expr) {
   Handle<GlobalObject> global(global_context->global());
@@ -1368,9 +1386,9 @@
 }
 
 
-static void InstallCustomCallGenerator(Handle<JSObject> holder,
-                                       const char* function_name,
-                                       int id) {
+static void InstallBuiltinFunctionId(Handle<JSObject> holder,
+                                     const char* function_name,
+                                     BuiltinFunctionId id) {
   Handle<String> name = Factory::LookupAsciiSymbol(function_name);
   Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
   Handle<JSFunction> function(JSFunction::cast(function_object));
@@ -1378,17 +1396,17 @@
 }
 
 
-void Genesis::InstallCustomCallGenerators() {
+void Genesis::InstallBuiltinFunctionIds() {
   HandleScope scope;
-#define INSTALL_CALL_GENERATOR(holder_expr, fun_name, name)     \
-  {                                                             \
-    Handle<JSObject> holder = ResolveCustomCallGeneratorHolder( \
-        global_context(), #holder_expr);                        \
-    const int id = CallStubCompiler::k##name##CallGenerator;    \
-    InstallCustomCallGenerator(holder, #fun_name, id);          \
+#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
+  {                                                     \
+    Handle<JSObject> holder = ResolveBuiltinIdHolder(   \
+        global_context(), #holder_expr);                \
+    BuiltinFunctionId id = k##name;                     \
+    InstallBuiltinFunctionId(holder, #fun_name, id);    \
   }
-  CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
-#undef INSTALL_CALL_GENERATOR
+  FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
+#undef INSTALL_BUILTIN_ID
 }
 
 
@@ -1596,7 +1614,7 @@
         = Handle<SharedFunctionInfo>(function->shared());
     if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
     // Set the code object on the function object.
-    function->set_code(function->shared()->code());
+    function->ReplaceCode(function->shared()->code());
     builtins->set_javascript_builtin_code(id, shared->code());
   }
   return true;
@@ -1784,6 +1802,7 @@
   if (!new_context.is_null()) {
     global_context_ =
       Handle<Context>::cast(GlobalHandles::Create(*new_context));
+    AddToWeakGlobalContextList(*global_context_);
     Top::set_context(*global_context_);
     i::Counters::contexts_created_by_snapshot.Increment();
     result_ = global_context_;
@@ -1819,11 +1838,6 @@
     i::Counters::contexts_created_from_scratch.Increment();
   }
 
-  // Add this context to the weak list of global contexts.
-  (*global_context_)->set(Context::NEXT_CONTEXT_LINK,
-                          Heap::global_contexts_list());
-  Heap::set_global_contexts_list(*global_context_);
-
   result_ = global_context_;
 }
 
diff --git a/src/builtins.cc b/src/builtins.cc
index e88ef6f..21381f1 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -32,6 +32,7 @@
 #include "bootstrapper.h"
 #include "builtins.h"
 #include "ic-inl.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -514,10 +515,10 @@
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
         EnsureJSArrayWithWritableFastElements(receiver);
+    if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayShift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
   }
-  if (elms_obj == NULL ||
-      !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
+  if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
     return CallJsBuiltin("ArrayShift", args);
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
@@ -556,10 +557,10 @@
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
         EnsureJSArrayWithWritableFastElements(receiver);
+    if (maybe_elms_obj == NULL) return CallJsBuiltin("ArrayUnshift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
   }
-  if (elms_obj == NULL ||
-      !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
+  if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
     return CallJsBuiltin("ArrayUnshift", args);
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
@@ -610,21 +611,46 @@
 
 BUILTIN(ArraySlice) {
   Object* receiver = *args.receiver();
-  Object* elms_obj;
+  FixedArray* elms;
+  int len = -1;
   { MaybeObject* maybe_elms_obj =
         EnsureJSArrayWithWritableFastElements(receiver);
-    if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
-  }
-  if (elms_obj == NULL ||
-      !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
-    return CallJsBuiltin("ArraySlice", args);
-  }
-  FixedArray* elms = FixedArray::cast(elms_obj);
-  JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+    Object* elms_obj;
+    if (maybe_elms_obj != NULL && maybe_elms_obj->ToObject(&elms_obj)) {
+      if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
+        return CallJsBuiltin("ArraySlice", args);
+      }
+      elms = FixedArray::cast(elms_obj);
+      JSArray* array = JSArray::cast(receiver);
+      ASSERT(array->HasFastElements());
 
-  int len = Smi::cast(array->length())->value();
+      len = Smi::cast(array->length())->value();
+    } else {
+      // Array.slice(arguments, ...) is quite a common idiom (notably more
+      // than 50% of invocations in Web apps).  Treat it in C++ as well.
+      Map* arguments_map =
+          Top::context()->global_context()->arguments_boilerplate()->map();
 
+      bool is_arguments_object_with_fast_elements =
+          receiver->IsJSObject()
+          && JSObject::cast(receiver)->map() == arguments_map
+          && JSObject::cast(receiver)->HasFastElements();
+      if (!is_arguments_object_with_fast_elements) {
+        return CallJsBuiltin("ArraySlice", args);
+      }
+      elms = FixedArray::cast(JSObject::cast(receiver)->elements());
+      len = elms->length();
+#ifdef DEBUG
+      // Arguments object by construction should have no holes, check it.
+      if (FLAG_enable_slow_asserts) {
+        for (int i = 0; i < len; i++) {
+          ASSERT(elms->get(i) != Heap::the_hole_value());
+        }
+      }
+#endif
+    }
+  }
+  ASSERT(len >= 0);
   int n_arguments = args.length() - 1;
 
   // Note carefully choosen defaults---if argument is missing,
@@ -692,10 +718,10 @@
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
         EnsureJSArrayWithWritableFastElements(receiver);
+    if (maybe_elms_obj == NULL) return CallJsBuiltin("ArraySplice", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
   }
-  if (elms_obj == NULL ||
-      !IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
+  if (!IsJSArrayFastElementMovingAllowed(JSArray::cast(receiver))) {
     return CallJsBuiltin("ArraySplice", args);
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
@@ -1031,9 +1057,7 @@
     {
       // Leaving JavaScript.
       VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-      state.set_external_callback(v8::ToCData<Address>(callback_obj));
-#endif
+      ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
       value = callback(new_args);
     }
     if (value.IsEmpty()) {
@@ -1103,9 +1127,7 @@
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-    state.set_external_callback(v8::ToCData<Address>(callback_obj));
-#endif
+    ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
     v8::InvocationCallback callback =
         v8::ToCData<v8::InvocationCallback>(callback_obj);
 
@@ -1169,9 +1191,7 @@
     {
       // Leaving JavaScript.
       VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-      state.set_external_callback(v8::ToCData<Address>(callback_obj));
-#endif
+      ExternalCallbackScope call_scope(v8::ToCData<Address>(callback_obj));
       value = callback(new_args);
     }
     if (value.IsEmpty()) {
@@ -1332,6 +1352,11 @@
 }
 
 
+static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
+  StoreIC::GenerateGlobalProxy(masm);
+}
+
+
 static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
   KeyedStoreIC::GenerateGeneric(masm);
 }
@@ -1581,4 +1606,5 @@
   return NULL;
 }
 
+
 } }  // namespace v8::internal
diff --git a/src/builtins.h b/src/builtins.h
index b5e8c4e..d2b4be2 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,6 +71,10 @@
   V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)                   \
   V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)                   \
   V(LazyCompile,                BUILTIN, UNINITIALIZED)                   \
+  V(LazyRecompile,              BUILTIN, UNINITIALIZED)                   \
+  V(NotifyDeoptimized,          BUILTIN, UNINITIALIZED)                   \
+  V(NotifyLazyDeoptimized,      BUILTIN, UNINITIALIZED)                   \
+  V(NotifyOSR,                  BUILTIN, UNINITIALIZED)                   \
                                                                           \
   V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)                   \
   V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)                   \
@@ -102,6 +106,7 @@
   V(StoreIC_ArrayLength,        STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Normal,             STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
+  V(StoreIC_GlobalProxy,        STORE_IC, MEGAMORPHIC)                    \
                                                                           \
   V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
   V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)              \
@@ -120,7 +125,9 @@
   V(ArrayCode,                  BUILTIN, UNINITIALIZED)                   \
   V(ArrayConstructCode,         BUILTIN, UNINITIALIZED)                   \
                                                                           \
-  V(StringConstructCode,        BUILTIN, UNINITIALIZED)
+  V(StringConstructCode,        BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(OnStackReplacement,         BUILTIN, UNINITIALIZED)
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -256,6 +263,10 @@
   static void Generate_JSEntryTrampoline(MacroAssembler* masm);
   static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
   static void Generate_LazyCompile(MacroAssembler* masm);
+  static void Generate_LazyRecompile(MacroAssembler* masm);
+  static void Generate_NotifyDeoptimized(MacroAssembler* masm);
+  static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
+  static void Generate_NotifyOSR(MacroAssembler* masm);
   static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
 
   static void Generate_FunctionCall(MacroAssembler* masm);
@@ -265,6 +276,8 @@
   static void Generate_ArrayConstructCode(MacroAssembler* masm);
 
   static void Generate_StringConstructCode(MacroAssembler* masm);
+
+  static void Generate_OnStackReplacement(MacroAssembler* masm);
 };
 
 } }  // namespace v8::internal
diff --git a/src/checks.h b/src/checks.h
index d49f97f..2bb94bb 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -30,6 +30,7 @@
 
 #include <string.h>
 
+#include "../include/v8stdint.h"
 extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
 
 // The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -231,6 +232,8 @@
 
 #define CHECK_GT(a, b) CHECK((a) > (b))
 #define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
 
 
 // This is inspired by the static assertion facility in boost.  This
@@ -281,7 +284,7 @@
 // safely enabled in release mode. Moreover, the ((void) 0) expression
 // obeys different syntax rules than typedef's, e.g. it can't appear
 // inside class declaration, this leads to inconsistency between debug
-// and release compilation modes behaviour.
+// and release compilation modes behavior.
 #define STATIC_ASSERT(test)  STATIC_CHECK(test)
 
 #define ASSERT_NOT_NULL(p)  ASSERT_NE(NULL, p)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 8b9198f..1b0d8b0 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -103,6 +103,7 @@
         GetICState());
     Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
     RecordCodeGeneration(*new_object, &masm);
+    FinishCode(*new_object);
 
     // Update the dictionary and the root in Heap.
     Handle<NumberDictionary> dict =
@@ -142,6 +143,7 @@
     }
     code = Code::cast(new_object);
     RecordCodeGeneration(code, &masm);
+    FinishCode(code);
 
     // Try to update the code cache but do not fail if unable.
     MaybeObject* maybe_new_object =
@@ -170,4 +172,29 @@
 }
 
 
+int ICCompareStub::MinorKey() {
+  return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
+}
+
+
+void ICCompareStub::Generate(MacroAssembler* masm) {
+  switch (state_) {
+    case CompareIC::UNINITIALIZED:
+      GenerateMiss(masm);
+      break;
+    case CompareIC::SMIS:
+      GenerateSmis(masm);
+      break;
+    case CompareIC::HEAP_NUMBERS:
+      GenerateHeapNumbers(masm);
+      break;
+    case CompareIC::OBJECTS:
+      GenerateObjects(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
index b156647..b7804b7 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -29,7 +29,6 @@
 #define V8_CODE_STUBS_H_
 
 #include "globals.h"
-#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -39,11 +38,16 @@
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
   V(GenericBinaryOp)                     \
+  V(TypeRecordingBinaryOp)               \
   V(StringAdd)                           \
+  V(StringCharAt)                        \
   V(SubString)                           \
   V(StringCompare)                       \
   V(SmiOp)                               \
   V(Compare)                             \
+  V(CompareIC)                           \
+  V(MathPow)                             \
+  V(TranscendentalCache)                 \
   V(RecordWrite)                         \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
@@ -52,7 +56,6 @@
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
   V(FastCloneShallowArray)               \
-  V(TranscendentalCache)                 \
   V(GenericUnaryOp)                      \
   V(RevertToNumber)                      \
   V(ToBoolean)                           \
@@ -60,6 +63,7 @@
   V(CounterOp)                           \
   V(ArgumentsAccess)                     \
   V(RegExpExec)                          \
+  V(RegExpConstructResult)               \
   V(NumberToString)                      \
   V(CEntry)                              \
   V(JSEntry)                             \
@@ -125,7 +129,7 @@
   virtual ~CodeStub() {}
 
  protected:
-  static const int kMajorBits = 5;
+  static const int kMajorBits = 6;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
@@ -143,6 +147,9 @@
   // initially generated.
   void RecordCodeGeneration(Code* code, MacroAssembler* masm);
 
+  // Finish the code object after it has been generated.
+  virtual void FinishCode(Code* code) { }
+
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
   virtual int MinorKey() = 0;
@@ -216,11 +223,11 @@
 namespace internal {
 
 
-// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
+// RuntimeCallHelper implementation used in stubs: enters/leaves a
 // newly created internal frame before/after the runtime call.
-class ICRuntimeCallHelper : public RuntimeCallHelper {
+class StubRuntimeCallHelper : public RuntimeCallHelper {
  public:
-  ICRuntimeCallHelper() {}
+  StubRuntimeCallHelper() {}
 
   virtual void BeforeCall(MacroAssembler* masm) const;
 
@@ -318,13 +325,24 @@
 
 class InstanceofStub: public CodeStub {
  public:
-  InstanceofStub() { }
+  enum Flags {
+    kNoFlags = 0,
+    kArgsInRegisters = 1 << 0
+  };
+
+  explicit InstanceofStub(Flags flags) : flags_(flags) { }
 
   void Generate(MacroAssembler* masm);
 
  private:
   Major MajorKey() { return Instanceof; }
-  int MinorKey() { return 0; }
+  int MinorKey() { return args_in_registers() ? 1 : 0; }
+
+  bool args_in_registers() {
+    return (flags_ & kArgsInRegisters) != 0;
+  }
+
+  Flags flags_;
 };
 
 
@@ -376,9 +394,61 @@
 };
 
 
-enum NaNInformation {
-  kBothCouldBeNaN,
-  kCantBothBeNaN
+class MathPowStub: public CodeStub {
+ public:
+  MathPowStub() {}
+  virtual void Generate(MacroAssembler* masm);
+
+ private:
+  virtual CodeStub::Major MajorKey() { return MathPow; }
+  virtual int MinorKey() { return 0; }
+
+  const char* GetName() { return "MathPowStub"; }
+};
+
+
+class StringCharAtStub: public CodeStub {
+ public:
+  StringCharAtStub() {}
+
+ private:
+  Major MajorKey() { return StringCharAt; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
+class ICCompareStub: public CodeStub {
+ public:
+  ICCompareStub(Token::Value op, CompareIC::State state)
+      : op_(op), state_(state) {
+    ASSERT(Token::IsCompareOp(op));
+  }
+
+  virtual void Generate(MacroAssembler* masm);
+
+ private:
+  class OpField: public BitField<int, 0, 3> { };
+  class StateField: public BitField<int, 3, 5> { };
+
+  virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
+
+  virtual CodeStub::Major MajorKey() { return CompareIC; }
+  virtual int MinorKey();
+
+  virtual int GetCodeKind() { return Code::COMPARE_IC; }
+
+  void GenerateSmis(MacroAssembler* masm);
+  void GenerateHeapNumbers(MacroAssembler* masm);
+  void GenerateObjects(MacroAssembler* masm);
+  void GenerateMiss(MacroAssembler* masm);
+
+  bool strict() const { return op_ == Token::EQ_STRICT; }
+  Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+
+  Token::Value op_;
+  CompareIC::State state_;
 };
 
 
@@ -391,6 +461,12 @@
 };
 
 
+enum NaNInformation {
+  kBothCouldBeNaN,
+  kCantBothBeNaN
+};
+
+
 class CompareStub: public CodeStub {
  public:
   CompareStub(Condition cc,
@@ -398,7 +474,7 @@
               CompareFlags flags,
               Register lhs,
               Register rhs) :
-      cc_(cc),
+     cc_(cc),
       strict_(strict),
       never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
       include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
@@ -440,6 +516,7 @@
 
   // Register holding the left hand side of the comparison if the stub gives
   // a choice, no_reg otherwise.
+
   Register lhs_;
   // Register holding the right hand side of the comparison if the stub gives
   // a choice, no_reg otherwise.
@@ -457,6 +534,11 @@
 
   int MinorKey();
 
+  virtual int GetCodeKind() { return Code::COMPARE_IC; }
+  virtual void FinishCode(Code* code) {
+    code->set_compare_state(CompareIC::GENERIC);
+  }
+
   // Branch to the label if the given object isn't a symbol.
   void BranchIfNonSymbol(MacroAssembler* masm,
                          Label* label,
@@ -490,9 +572,11 @@
 
 class CEntryStub : public CodeStub {
  public:
-  explicit CEntryStub(int result_size) : result_size_(result_size) { }
+  explicit CEntryStub(int result_size)
+      : result_size_(result_size), save_doubles_(false) { }
 
   void Generate(MacroAssembler* masm);
+  void SaveDoubles() { save_doubles_ = true; }
 
  private:
   void GenerateCore(MacroAssembler* masm,
@@ -508,10 +592,9 @@
 
   // Number of pointers/values returned.
   const int result_size_;
+  bool save_doubles_;
 
   Major MajorKey() { return CEntry; }
-  // Minor key must differ if different result_size_ values means different
-  // code is generated.
   int MinorKey();
 
   const char* GetName() { return "CEntryStub"; }
@@ -597,6 +680,26 @@
 };
 
 
+class RegExpConstructResultStub: public CodeStub {
+ public:
+  RegExpConstructResultStub() { }
+
+ private:
+  Major MajorKey() { return RegExpConstructResult; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "RegExpConstructResultStub"; }
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("RegExpConstructResultStub\n");
+  }
+#endif
+};
+
+
 class CallFunctionStub: public CodeStub {
  public:
   CallFunctionStub(int argc, InLoopFlag in_loop, CallFunctionFlags flags)
diff --git a/src/codegen.cc b/src/codegen.cc
index fb8c5cd..da479e8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -139,6 +139,16 @@
     print_source = FLAG_print_source;
     print_ast = FLAG_print_ast;
     print_json_ast = FLAG_print_json_ast;
+    Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+    if (print_source && !filter.is_empty()) {
+      print_source = info->function()->name()->IsEqualTo(filter);
+    }
+    if (print_ast && !filter.is_empty()) {
+      print_ast = info->function()->name()->IsEqualTo(filter);
+    }
+    if (print_json_ast && !filter.is_empty()) {
+      print_json_ast = info->function()->name()->IsEqualTo(filter);
+    }
     ftype = "user-defined";
   }
 
@@ -174,14 +184,24 @@
   masm->GetCode(&desc);
   Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
 
+  if (!code.is_null()) {
+    Counters::total_compiled_code_size.Increment(code->instruction_size());
+  }
+  return code;
+}
+
+
+void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
 #ifdef ENABLE_DISASSEMBLER
   bool print_code = Bootstrapper::IsActive()
       ? FLAG_print_builtin_code
-      : FLAG_print_code;
-  if (print_code) {
+      : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
+  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+  FunctionLiteral* function = info->function();
+  bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
+  if (print_code && match) {
     // Print the source code if available.
     Handle<Script> script = info->script();
-    FunctionLiteral* function = info->function();
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
       StringInputBuffer stream(String::cast(script->source()));
@@ -195,26 +215,35 @@
       }
       PrintF("\n\n");
     }
-    PrintF("--- Code ---\n");
-    code->Disassemble(*function->name()->ToCString());
+    if (info->IsOptimizing()) {
+      if (FLAG_print_unopt_code) {
+        PrintF("--- Unoptimized code ---\n");
+        info->closure()->shared()->code()->Disassemble(
+            *function->debug_name()->ToCString());
+      }
+      PrintF("--- Optimized code ---\n");
+    } else {
+      PrintF("--- Code ---\n");
+    }
+    code->Disassemble(*function->debug_name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
-
-  if (!code.is_null()) {
-    Counters::total_compiled_code_size.Increment(code->instruction_size());
-  }
-  return code;
 }
 
 
 // Generate the code.  Compile the AST and assemble all the pieces into a
 // Code object.
 bool CodeGenerator::MakeCode(CompilationInfo* info) {
+  // When using Crankshaft the classic backend should never be used.
+  ASSERT(!V8::UseCrankshaft());
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
     Counters::total_old_codegen_source_size.Increment(len);
   }
+  if (FLAG_trace_codegen) {
+    PrintF("Classic Compiler - ");
+  }
   MakeCodePrologue(info);
   // Generate code.
   const int kInitialBufferSize = 4 * KB;
@@ -230,6 +259,9 @@
   InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
   Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
+  // There is no stack check table in code generated by the classic backend.
+  code->SetNoStackCheckTable();
+  CodeGenerator::PrintCode(code, info);
   info->SetCode(code);  // May be an empty handle.
   return !code.is_null();
 }
@@ -441,10 +473,11 @@
 
 int CEntryStub::MinorKey() {
   ASSERT(result_size_ == 1 || result_size_ == 2);
+  int result = save_doubles_ ? 1 : 0;
 #ifdef _WIN64
-  return result_size_ == 1 ? 0 : 1;
+  return result | ((result_size_ == 1) ? 0 : 2);
 #else
-  return 0;
+  return result;
 #endif
 }
 
diff --git a/src/codegen.h b/src/codegen.h
index 66300d6..23b36f0 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -68,6 +68,9 @@
 //   CodeForDoWhileConditionPosition
 //   CodeForSourcePosition
 
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
 #if V8_TARGET_ARCH_IA32
 #include "ia32/codegen-ia32.h"
 #elif V8_TARGET_ARCH_X64
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 6e4e4bf..38438cb 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -86,6 +86,9 @@
   // Clear this sub-cache evicting all its content.
   void Clear();
 
+  // Remove given shared function info from sub-cache.
+  void Remove(Handle<SharedFunctionInfo> function_info);
+
   // Number of generations in this sub-cache.
   inline int generations() { return generations_; }
 
@@ -249,6 +252,18 @@
 }
 
 
+void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
+  // Probe the script generation tables. Make sure not to leak handles
+  // into the caller's handle scope.
+  { HandleScope scope;
+    for (int generation = 0; generation < generations(); generation++) {
+      Handle<CompilationCacheTable> table = GetTable(generation);
+      table->Remove(*function_info);
+    }
+  }
+}
+
+
 // We only re-use a cached function for some script source code if the
 // script originates from the same place. This is to avoid issues
 // when reporting errors, etc.
@@ -467,6 +482,15 @@
 }
 
 
+void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
+  if (!IsEnabled()) return;
+
+  eval_global.Remove(function_info);
+  eval_contextual.Remove(function_info);
+  script.Remove(function_info);
+}
+
+
 Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
                                                           Handle<Object> name,
                                                           int line_offset,
@@ -545,6 +569,45 @@
 }
 
 
+static bool SourceHashCompare(void* key1, void* key2) {
+  return key1 == key2;
+}
+
+
+static HashMap* EagerOptimizingSet() {
+  static HashMap map(&SourceHashCompare);
+  return &map;
+}
+
+
+bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
+  if (FLAG_opt_eagerly) return true;
+  uint32_t hash = function->SourceHash();
+  void* key = reinterpret_cast<void*>(hash);
+  return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
+}
+
+
+void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
+  uint32_t hash = function->SourceHash();
+  void* key = reinterpret_cast<void*>(hash);
+  EagerOptimizingSet()->Lookup(key, hash, true);
+}
+
+
+void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
+  uint32_t hash = function->SourceHash();
+  void* key = reinterpret_cast<void*>(hash);
+  EagerOptimizingSet()->Remove(key, hash);
+}
+
+
+void CompilationCache::ResetEagerOptimizingData() {
+  HashMap* set = EagerOptimizingSet();
+  if (set->occupancy() > 0) set->Clear();
+}
+
+
 void CompilationCache::Clear() {
   for (int i = 0; i < kSubCacheCount; i++) {
     subcaches[i]->Clear();
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 22ecff8..37e21be 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -76,9 +76,20 @@
                         JSRegExp::Flags flags,
                         Handle<FixedArray> data);
 
+  // Support for eager optimization tracking.
+  static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
+  static void MarkForEagerOptimizing(Handle<JSFunction> function);
+  static void MarkForLazyOptimizing(Handle<JSFunction> function);
+
+  // Reset the eager optimization tracking data.
+  static void ResetEagerOptimizingData();
+
   // Clear the cache - also used to initialize the cache at startup.
   static void Clear();
 
+  // Remove given shared function info from all caches.
+  static void Remove(Handle<SharedFunctionInfo> function_info);
+
   // GC support.
   static void Iterate(ObjectVisitor* v);
   static void IterateFunctions(ObjectVisitor* v);
diff --git a/src/compiler.cc b/src/compiler.cc
index 29bbbc7..e4864e4 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -35,12 +35,16 @@
 #include "data-flow.h"
 #include "debug.h"
 #include "full-codegen.h"
+#include "hydrogen.h"
+#include "lithium-allocator.h"
 #include "liveedit.h"
 #include "oprofile-agent.h"
 #include "parser.h"
 #include "rewriter.h"
+#include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "scopes.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -52,7 +56,10 @@
       scope_(NULL),
       script_(script),
       extension_(NULL),
-      pre_parse_data_(NULL) {
+      pre_parse_data_(NULL),
+      supports_deoptimization_(false),
+      osr_ast_id_(AstNode::kNoNumber) {
+  Initialize(NONOPT);
 }
 
 
@@ -63,7 +70,10 @@
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       extension_(NULL),
-      pre_parse_data_(NULL) {
+      pre_parse_data_(NULL),
+      supports_deoptimization_(false),
+      osr_ast_id_(AstNode::kNoNumber) {
+  Initialize(BASE);
 }
 
 
@@ -75,31 +85,213 @@
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       extension_(NULL),
-      pre_parse_data_(NULL) {
+      pre_parse_data_(NULL),
+      supports_deoptimization_(false),
+      osr_ast_id_(AstNode::kNoNumber) {
+  Initialize(BASE);
 }
 
 
-// For normal operation the syntax checker is used to determine whether to
-// use the full compiler for top level code or not. However if the flag
-// --always-full-compiler is specified or debugging is active the full
-// compiler will be used for all code.
+// Determine whether to use the full compiler for all code. If the flag
+// --always-full-compiler is specified this is the case. For the virtual frame
+// based compiler the full compiler is also used if a debugger is connected, as
+// the code from the full compiler supports mode precise break points. For the
+// crankshaft adaptive compiler debugging the optimized code is not possible at
+// all. However crankshaft support recompilation of functions, so in this case
+// the full compiler need not be be used if a debugger is attached, but only if
+// break points has actually been set.
 static bool AlwaysFullCompiler() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+  if (V8::UseCrankshaft()) {
+    return FLAG_always_full_compiler || Debug::has_break_points();
+  } else {
+    return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+  }
 #else
   return FLAG_always_full_compiler;
 #endif
 }
 
 
+static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
+  int opt_count = function->shared()->opt_count();
+  function->shared()->set_opt_count(opt_count + 1);
+  double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+  if (FLAG_trace_opt) {
+    PrintF("[optimizing: ");
+    function->PrintName();
+    PrintF(" / %" V8PRIxPTR, reinterpret_cast<intptr_t>(*function));
+    PrintF(" - took %0.3f ms]\n", ms);
+  }
+  if (FLAG_trace_opt_stats) {
+    static double compilation_time = 0.0;
+    static int compiled_functions = 0;
+    static int code_size = 0;
+
+    compilation_time += ms;
+    compiled_functions++;
+    code_size += function->shared()->SourceSize();
+    PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+           compiled_functions,
+           code_size,
+           compilation_time);
+  }
+}
+
+
+static void AbortAndDisable(CompilationInfo* info) {
+  // Disable optimization for the shared function info and mark the
+  // code as non-optimizable. The marker on the shared function info
+  // is there because we flush non-optimized code thereby loosing the
+  // non-optimizable information for the code. When the code is
+  // regenerated and set on the shared function info it is marked as
+  // non-optimizable if optimization is disabled for the shared
+  // function info.
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  shared->set_optimization_disabled(true);
+  Handle<Code> code = Handle<Code>(shared->code());
+  ASSERT(code->kind() == Code::FUNCTION);
+  code->set_optimizable(false);
+  info->SetCode(code);
+  if (FLAG_trace_opt) {
+    PrintF("[disabled optimization for: ");
+    info->closure()->PrintName();
+    PrintF(" / %" V8PRIxPTR "]\n",
+           reinterpret_cast<intptr_t>(*info->closure()));
+  }
+}
+
+
+static bool MakeCrankshaftCode(CompilationInfo* info) {
+  // Test if we can optimize this function when asked to. We can only
+  // do this after the scopes are computed.
+  if (!info->AllowOptimize()) info->DisableOptimization();
+
+  // In case we are not optimizing simply return the code from
+  // the full code generator.
+  if (!info->IsOptimizing()) {
+    return FullCodeGenerator::MakeCode(info);
+  }
+
+  // We should never arrive here if there is not code object on the
+  // shared function object.
+  Handle<Code> code(info->shared_info()->code());
+  ASSERT(code->kind() == Code::FUNCTION);
+
+  // Fall back to using the full code generator if it's not possible
+  // to use the Hydrogen-based optimizing compiler. We already have
+  // generated code for this from the shared function object.
+  if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
+    info->SetCode(code);
+    return true;
+  }
+
+  // Limit the number of times we re-compile a functions with
+  // the optimizing compiler.
+  const int kMaxOptCount = FLAG_deopt_every_n_times == 0 ? 10 : 1000;
+  if (info->shared_info()->opt_count() > kMaxOptCount) {
+    AbortAndDisable(info);
+    // True indicates the compilation pipeline is still going, not
+    // necessarily that we optimized the code.
+    return true;
+  }
+
+  // Due to an encoding limit on LUnallocated operands in the Lithium
+  // language, we cannot optimize functions with too many formal parameters
+  // or perform on-stack replacement for function with too many
+  // stack-allocated local variables.
+  //
+  // The encoding is as a signed value, with parameters using the negative
+  // indices and locals the non-negative ones.
+  const int limit = LUnallocated::kMaxFixedIndices / 2;
+  Scope* scope = info->scope();
+  if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) {
+    AbortAndDisable(info);
+    // True indicates the compilation pipeline is still going, not
+    // necessarily that we optimized the code.
+    return true;
+  }
+
+  // Take --hydrogen-filter into account.
+  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+  Handle<String> name = info->function()->debug_name();
+  bool match = filter.is_empty() || name->IsEqualTo(filter);
+  if (!match) {
+    info->SetCode(code);
+    return true;
+  }
+
+  // Recompile the unoptimized version of the code if the current version
+  // doesn't have deoptimization support. Alternatively, we may decide to
+  // run the full code generator to get a baseline for the compile-time
+  // performance of the hydrogen-based compiler.
+  int64_t start = OS::Ticks();
+  bool should_recompile = !info->shared_info()->has_deoptimization_support();
+  if (should_recompile || FLAG_time_hydrogen) {
+    HPhase phase(HPhase::kFullCodeGen);
+    CompilationInfo unoptimized(info->shared_info());
+    // Note that we use the same AST that we will use for generating the
+    // optimized code.
+    unoptimized.SetFunction(info->function());
+    unoptimized.SetScope(info->scope());
+    if (should_recompile) unoptimized.EnableDeoptimizationSupport();
+    bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
+    if (should_recompile) {
+      if (!succeeded) return false;
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      shared->EnableDeoptimizationSupport(*unoptimized.code());
+      // The existing unoptimized code was replaced with the new one.
+      Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
+          Handle<String>(shared->DebugName()),
+          shared->start_position(),
+          &unoptimized);
+    }
+  }
+
+  // Check that the unoptimized, shared code is ready for
+  // optimizations.  When using the always_opt flag we disregard the
+  // optimizable marker in the code object and optimize anyway. This
+  // is safe as long as the unoptimized code has deoptimization
+  // support.
+  ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable());
+  ASSERT(info->shared_info()->has_deoptimization_support());
+
+  if (FLAG_trace_hydrogen) {
+    PrintF("-----------------------------------------------------------\n");
+    PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
+    HTracer::Instance()->TraceCompilation(info->function());
+  }
+
+  TypeFeedbackOracle oracle(Handle<Code>(info->shared_info()->code()));
+  HGraphBuilder builder(&oracle);
+  HPhase phase(HPhase::kTotal);
+  HGraph* graph = builder.CreateGraph(info);
+  if (graph != NULL && FLAG_build_lithium) {
+    Handle<Code> code = graph->Compile();
+    if (!code.is_null()) {
+      info->SetCode(code);
+      FinishOptimization(info->closure(), start);
+      return true;
+    }
+  }
+
+  // Compilation with the Hydrogen compiler failed. Keep using the
+  // shared code but mark it as unoptimizable.
+  AbortAndDisable(info);
+  // True indicates the compilation pipeline is still going, not necessarily
+  // that we optimized the code.
+  return true;
+}
+
+
 static bool MakeCode(CompilationInfo* info) {
   // Precondition: code has been parsed.  Postcondition: the code field in
   // the compilation info is set if compilation succeeded.
   ASSERT(info->function() != NULL);
 
-  if (Rewriter::Rewrite(info) &&
-      Scope::Analyze(info) &&
-      Rewriter::Analyze(info)) {
+  if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
+    if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
+
     // Generate code and return it.  Code generator selection is governed by
     // which backends are enabled and whether the function is considered
     // run-once code or not.
@@ -109,17 +301,19 @@
     //
     // The normal choice of backend can be overridden with the flags
     // --always-full-compiler.
-    Handle<SharedFunctionInfo> shared = info->shared_info();
-    bool is_run_once = (shared.is_null())
-        ? info->scope()->is_global_scope()
-        : (shared->is_toplevel() || shared->try_full_codegen());
-    bool can_use_full =
-        FLAG_full_compiler && !info->function()->contains_loops();
-    if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
-      return FullCodeGenerator::MakeCode(info);
-    } else {
-      AssignedVariablesAnalyzer ava;
-      return ava.Analyze(info) && CodeGenerator::MakeCode(info);
+    if (Rewriter::Analyze(info)) {
+      Handle<SharedFunctionInfo> shared = info->shared_info();
+      bool is_run_once = (shared.is_null())
+          ? info->scope()->is_global_scope()
+          : (shared->is_toplevel() || shared->try_full_codegen());
+      bool can_use_full =
+          FLAG_full_compiler && !info->function()->contains_loops();
+      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
+        return FullCodeGenerator::MakeCode(info);
+      } else {
+        return AssignedVariablesAnalyzer::Analyze(info) &&
+            CodeGenerator::MakeCode(info);
+      }
     }
   }
 
@@ -280,7 +474,14 @@
     ScriptDataImpl* pre_data = input_pre_data;
     if (pre_data == NULL
         && source_length >= FLAG_min_preparse_length) {
-      pre_data = ParserApi::PartialPreParse(source, NULL, extension);
+      if (source->IsExternalTwoByteString()) {
+        ExternalTwoByteStringUC16CharacterStream stream(
+            Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+        pre_data = ParserApi::PartialPreParse(&stream, extension);
+      } else {
+        GenericStringUC16CharacterStream stream(source, 0, source->length());
+        pre_data = ParserApi::PartialPreParse(&stream, extension);
+      }
     }
 
     // Create a script object describing the script to be compiled.
@@ -374,40 +575,60 @@
       Top::StackOverflow();
     } else {
       ASSERT(!info->code().is_null());
+      Handle<Code> code = info->code();
+      Handle<JSFunction> function = info->closure();
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
                                 Handle<String>(shared->DebugName()),
                                 shared->start_position(),
                                 info);
 
-      // Update the shared function info with the compiled code and the
-      // scope info.  Please note, that the order of the sharedfunction
-      // initialization is important since SerializedScopeInfo::Create might
-      // trigger a GC, causing the ASSERT below to be invalid if the code
-      // was flushed. By setting the code object last we avoid this.
-      Handle<SerializedScopeInfo> scope_info =
-          SerializedScopeInfo::Create(info->scope());
-      shared->set_scope_info(*scope_info);
-      shared->set_code(*info->code());
-      if (!info->closure().is_null()) {
-        info->closure()->set_code(*info->code());
+      if (info->IsOptimizing()) {
+        function->ReplaceCode(*code);
+      } else {
+        // Update the shared function info with the compiled code and the
+        // scope info.  Please note, that the order of the shared function
+        // info initialization is important since set_scope_info might
+        // trigger a GC, causing the ASSERT below to be invalid if the code
+        // was flushed. By settting the code object last we avoid this.
+        Handle<SerializedScopeInfo> scope_info =
+            SerializedScopeInfo::Create(info->scope());
+        shared->set_scope_info(*scope_info);
+        shared->set_code(*code);
+        if (!function.is_null()) {
+          function->ReplaceCode(*code);
+          ASSERT(!function->IsOptimized());
+        }
+
+        // Set the expected number of properties for instances.
+        FunctionLiteral* lit = info->function();
+        int expected = lit->expected_property_count();
+        SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+        // Set the optimization hints after performing lazy compilation, as
+        // these are not set when the function is set up as a lazily
+        // compiled function.
+        shared->SetThisPropertyAssignmentsInfo(
+            lit->has_only_simple_this_property_assignments(),
+            *lit->this_property_assignments());
+
+        // Check the function has compiled code.
+        ASSERT(shared->is_compiled());
+        shared->set_code_age(0);
+
+        if (V8::UseCrankshaft() && info->AllowOptimize()) {
+          // If we're asked to always optimize, we compile the optimized
+          // version of the function right away - unless the debugger is
+          // active as it makes no sense to compile optimized code then.
+          if (FLAG_always_opt && !Debug::has_break_points()) {
+            CompilationInfo optimized(function);
+            optimized.SetOptimizing(AstNode::kNoNumber);
+            return CompileLazy(&optimized);
+          } else if (CompilationCache::ShouldOptimizeEagerly(function)) {
+            RuntimeProfiler::OptimizeSoon(*function);
+          }
+        }
       }
 
-      // Set the expected number of properties for instances.
-      FunctionLiteral* lit = info->function();
-      SetExpectedNofPropertiesFromEstimate(shared,
-                                           lit->expected_property_count());
-
-      // Set the optimization hints after performing lazy compilation, as
-      // these are not set when the function is set up as a lazily compiled
-      // function.
-      shared->SetThisPropertyAssignmentsInfo(
-          lit->has_only_simple_this_property_assignments(),
-          *lit->this_property_assignments());
-
-      // Check the function has compiled code.
-      ASSERT(shared->is_compiled());
-      shared->set_code_age(0);
-      ASSERT(!info->code().is_null());
       return true;
     }
   }
@@ -419,12 +640,6 @@
 
 Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
                                                        Handle<Script> script) {
-#ifdef DEBUG
-  // We should not try to compile the same function literal more than
-  // once.
-  literal->mark_as_compiled();
-#endif
-
   // Precondition: code has been parsed and scopes have been analyzed.
   CompilationInfo info(script);
   info.SetFunction(literal);
@@ -446,28 +661,31 @@
     Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
     info.SetCode(code);
   } else {
-    // Generate code and return it.  The way that the compilation mode
-    // is controlled by the command-line flags is described in
-    // the static helper function MakeCode.
-    //
-    // The bodies of function literals have not yet been visited by
-    // the AST analyzer.
-    if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-
-    bool is_run_once = literal->try_full_codegen();
-    bool use_full = FLAG_full_compiler && !literal->contains_loops();
-    if (AlwaysFullCompiler() || (use_full && is_run_once)) {
-      if (!FullCodeGenerator::MakeCode(&info)) {
+    if (V8::UseCrankshaft()) {
+      if (!MakeCrankshaftCode(&info)) {
         return Handle<SharedFunctionInfo>::null();
       }
     } else {
-      // We fall back to the classic V8 code generator.
-      AssignedVariablesAnalyzer ava;
-      if (!ava.Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-      if (!CodeGenerator::MakeCode(&info)) {
-        return Handle<SharedFunctionInfo>::null();
+      // The bodies of function literals have not yet been visited by the
+      // AST optimizer/analyzer.
+      if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
+
+      bool is_run_once = literal->try_full_codegen();
+      bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
+
+      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
+        if (!FullCodeGenerator::MakeCode(&info)) {
+          return Handle<SharedFunctionInfo>::null();
+        }
+      } else {
+        // We fall back to the classic V8 code generator.
+        if (!AssignedVariablesAnalyzer::Analyze(&info) ||
+            !CodeGenerator::MakeCode(&info)) {
+          return Handle<SharedFunctionInfo>::null();
+        }
       }
     }
+    ASSERT(!info.code().is_null());
 
     // Function compilation complete.
     RecordFunctionCompilation(Logger::FUNCTION_TAG,
@@ -484,6 +702,7 @@
                                      info.code(),
                                      scope_info);
   SetFunctionInfo(result, literal, false, script);
+  result->set_allows_lazy_compilation(allow_lazy);
 
   // Set the expected number of properties for instances and return
   // the resulting function.
diff --git a/src/compiler.h b/src/compiler.h
index 20868e5..1176c69 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -59,6 +59,7 @@
   v8::Extension* extension() const { return extension_; }
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> calling_context() const { return calling_context_; }
+  int osr_ast_id() const { return osr_ast_id_; }
 
   void MarkAsEval() {
     ASSERT(!is_lazy());
@@ -93,8 +94,66 @@
     ASSERT(is_eval());
     calling_context_ = context;
   }
+  void SetOsrAstId(int osr_ast_id) {
+    ASSERT(IsOptimizing());
+    osr_ast_id_ = osr_ast_id;
+  }
+
+  bool has_global_object() const {
+    return !closure().is_null() && (closure()->context()->global() != NULL);
+  }
+
+  GlobalObject* global_object() const {
+    return has_global_object() ? closure()->context()->global() : NULL;
+  }
+
+  // Accessors for the different compilation modes.
+  bool IsOptimizing() const { return mode_ == OPTIMIZE; }
+  bool IsOptimizable() const { return mode_ == BASE; }
+  void SetOptimizing(int osr_ast_id) {
+    SetMode(OPTIMIZE);
+    osr_ast_id_ = osr_ast_id;
+  }
+  void DisableOptimization() { SetMode(NONOPT); }
+
+  // Deoptimization support.
+  bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
+  void EnableDeoptimizationSupport() {
+    ASSERT(IsOptimizable());
+    supports_deoptimization_ = true;
+  }
+
+  // Determine whether or not we can adaptively optimize.
+  bool AllowOptimize() {
+    return V8::UseCrankshaft() &&
+           !closure_.is_null() &&
+           function_->AllowOptimize();
+  }
 
  private:
+  // Compilation mode.
+  // BASE is generated by the full codegen, optionally prepared for bailouts.
+  // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
+  // NONOPT is generated by the full codegen or the classic backend
+  //   and is not prepared for recompilation/bailouts. These functions
+  //   are never recompiled.
+  enum Mode {
+    BASE,
+    OPTIMIZE,
+    NONOPT
+  };
+
+  CompilationInfo() : function_(NULL) {}
+
+  void Initialize(Mode mode) {
+    mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+  }
+
+  void SetMode(Mode mode) {
+    ASSERT(V8::UseCrankshaft());
+    mode_ = mode;
+  }
+
   // Flags using template class BitField<type, start, length>.  All are
   // false by default.
   //
@@ -130,6 +189,11 @@
   // handle otherwise.
   Handle<Context> calling_context_;
 
+  // Compilation mode flag and whether deoptimization is allowed.
+  Mode mode_;
+  bool supports_deoptimization_;
+  int osr_ast_id_;
+
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
@@ -185,7 +249,6 @@
   static bool MakeCodeForLiveEdit(CompilationInfo* info);
 #endif
 
- private:
   static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
                                         Handle<String> name,
                                         int start_position,
diff --git a/src/contexts.cc b/src/contexts.cc
index 1ce5007..3ad72a1 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -239,6 +239,69 @@
 }
 
 
+void Context::AddOptimizedFunction(JSFunction* function) {
+  ASSERT(IsGlobalContext());
+#ifdef DEBUG
+  Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
+  while (!element->IsUndefined()) {
+    CHECK(element != function);
+    element = JSFunction::cast(element)->next_function_link();
+  }
+
+  CHECK(function->next_function_link()->IsUndefined());
+
+  // Check that the context belongs to the weak global contexts list.
+  bool found = false;
+  Object* context = Heap::global_contexts_list();
+  while (!context->IsUndefined()) {
+    if (context == this) {
+      found = true;
+      break;
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+  CHECK(found);
+#endif
+  function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
+  set(OPTIMIZED_FUNCTIONS_LIST, function);
+}
+
+
+void Context::RemoveOptimizedFunction(JSFunction* function) {
+  ASSERT(IsGlobalContext());
+  Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
+  JSFunction* prev = NULL;
+  while (!element->IsUndefined()) {
+    JSFunction* element_function = JSFunction::cast(element);
+    ASSERT(element_function->next_function_link()->IsUndefined() ||
+           element_function->next_function_link()->IsJSFunction());
+    if (element_function == function) {
+      if (prev == NULL) {
+        set(OPTIMIZED_FUNCTIONS_LIST, element_function->next_function_link());
+      } else {
+        prev->set_next_function_link(element_function->next_function_link());
+      }
+      element_function->set_next_function_link(Heap::undefined_value());
+      return;
+    }
+    prev = element_function;
+    element = element_function->next_function_link();
+  }
+  UNREACHABLE();
+}
+
+
+Object* Context::OptimizedFunctionsListHead() {
+  ASSERT(IsGlobalContext());
+  return get(OPTIMIZED_FUNCTIONS_LIST);
+}
+
+
+void Context::ClearOptimizedFunctions() {
+  set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
+}
+
+
 #ifdef DEBUG
 bool Context::IsBootstrappingOrContext(Object* object) {
   // During bootstrapping we allow all objects to pass as
diff --git a/src/contexts.h b/src/contexts.h
index 9722a93..d0d54d1 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -228,12 +228,13 @@
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
-    NEXT_CONTEXT_LINK,
+    OPTIMIZED_FUNCTIONS_LIST,  // Weak.
+    NEXT_CONTEXT_LINK,  // Weak.
 
     // Total number of slots.
     GLOBAL_CONTEXT_SLOTS,
 
-    FIRST_WEAK_SLOT = NEXT_CONTEXT_LINK
+    FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
   };
 
   // Direct slot access.
@@ -291,6 +292,12 @@
     return IsCatchContext() && extension() == object;
   }
 
+  // A global context hold a list of all functions which have been optimized.
+  void AddOptimizedFunction(JSFunction* function);
+  void RemoveOptimizedFunction(JSFunction* function);
+  Object* OptimizedFunctionsListHead();
+  void ClearOptimizedFunctions();
+
 #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   void  set_##name(type* value) {                         \
     ASSERT(IsGlobalContext());                            \
diff --git a/src/counters.h b/src/counters.h
index aed46cf..048fdaa 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -28,6 +28,9 @@
 #ifndef V8_COUNTERS_H_
 #define V8_COUNTERS_H_
 
+#include "../include/v8.h"
+#include "allocation.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index da19a45..f13c0ee 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -34,6 +34,7 @@
 #include "frames-inl.h"
 #include "hashmap.h"
 #include "log-inl.h"
+#include "vm-state-inl.h"
 
 #include "../include/v8-profiler.h"
 
@@ -223,7 +224,7 @@
 void ProfilerEventsProcessor::AddCurrentStack() {
   TickSampleEventRecord record;
   TickSample* sample = &record.sample;
-  sample->state = VMState::current_state();
+  sample->state = Top::current_vm_state();
   sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
   sample->frames_count = 0;
   for (StackTraceFrameIterator it;
@@ -314,6 +315,7 @@
 
 
 CpuProfiler* CpuProfiler::singleton_ = NULL;
+Atomic32 CpuProfiler::is_profiling_ = false;
 
 void CpuProfiler::StartProfiling(const char* title) {
   ASSERT(singleton_ != NULL);
@@ -435,7 +437,7 @@
   }
   singleton_->processor_->FunctionCreateEvent(
       function->address(),
-      function->code()->address(),
+      function->shared()->code()->address(),
       security_token_id);
 }
 
@@ -525,6 +527,7 @@
     Logger::logging_nesting_ = 0;
     generator_ = new ProfileGenerator(profiles_);
     processor_ = new ProfilerEventsProcessor(generator_);
+    NoBarrier_Store(&is_profiling_, true);
     processor_->Start();
     // Enumerate stuff we already have in the heap.
     if (Heap::HasBeenSetup()) {
@@ -539,7 +542,9 @@
       Logger::LogAccessorCallbacks();
     }
     // Enable stack sampling.
-    reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
+    Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
+    if (!sampler->IsActive()) sampler->Start();
+    sampler->IncreaseProfilingDepth();
   }
 }
 
@@ -570,12 +575,15 @@
 
 void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
   if (profiles_->IsLastProfile(title)) {
-    reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
+    Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
+    sampler->DecreaseProfilingDepth();
+    sampler->Stop();
     processor_->Stop();
     processor_->Join();
     delete processor_;
     delete generator_;
     processor_ = NULL;
+    NoBarrier_Store(&is_profiling_, false);
     generator_ = NULL;
     Logger::logging_nesting_ = saved_logging_nesting_;
   }
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index d3158d7..10165f6 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -30,6 +30,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
+#include "atomicops.h"
 #include "circular-queue.h"
 #include "unbound-queue.h"
 
@@ -269,7 +270,7 @@
   static void SetterCallbackEvent(String* name, Address entry_point);
 
   static INLINE(bool is_profiling()) {
-    return singleton_ != NULL && singleton_->processor_ != NULL;
+    return NoBarrier_Load(&is_profiling_);
   }
 
  private:
@@ -290,6 +291,7 @@
   int saved_logging_nesting_;
 
   static CpuProfiler* singleton_;
+  static Atomic32 is_profiling_;
 
 #else
   static INLINE(bool is_profiling()) { return false; }
diff --git a/src/d8.gyp b/src/d8.gyp
new file mode 100644
index 0000000..3283e38
--- /dev/null
+++ b/src/d8.gyp
@@ -0,0 +1,85 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'targets': [
+    {
+      'target_name': 'd8',
+      'type': 'executable',
+      'dependencies': [
+        'd8_js2c#host',
+        '../tools/gyp/v8.gyp:v8',
+      ],
+      'include_dirs+': [
+        '../src',
+      ],
+      'defines': [
+        'ENABLE_DEBUGGER_SUPPORT',
+      ],
+      'sources': [
+        'd8.cc',
+        'd8-debug.cc',
+        '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
+      ],
+      'conditions': [
+        [ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
+          'sources': [ 'd8-posix.cc', ]
+        }],
+      ],
+    },
+    {
+      'target_name': 'd8_js2c',
+      'type': 'none',
+      'toolsets': ['host'],
+      'variables': {
+        'js_files': [
+          'd8.js',
+        ],
+      },
+      'actions': [
+        {
+          'action_name': 'd8_js2c',
+          'inputs': [
+            '../tools/js2c.py',
+            '<@(js_files)',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
+            '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
+          ],
+          'action': [
+            'python',
+            '../tools/js2c.py',
+            '<@(_outputs)',
+            'D8',
+            '<@(js_files)'
+          ],
+        },
+      ],
+    }
+  ],
+}
diff --git a/src/d8.h b/src/d8.h
index 30f04c7..de1fe0d 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -138,6 +138,10 @@
   static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
 #endif
 
+#ifdef WIN32
+#undef Yield
+#endif
+
   static Handle<Value> Print(const Arguments& args);
   static Handle<Value> Write(const Arguments& args);
   static Handle<Value> Yield(const Arguments& args);
diff --git a/src/data-flow.cc b/src/data-flow.cc
index be82446..9c02ff4 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -33,7 +33,6 @@
 namespace v8 {
 namespace internal {
 
-
 #ifdef DEBUG
 void BitVector::Print() {
   bool first = true;
@@ -50,13 +49,39 @@
 #endif
 
 
+void BitVector::Iterator::Advance() {
+  current_++;
+  uint32_t val = current_value_;
+  while (val == 0) {
+    current_index_++;
+    if (Done()) return;
+    val = target_->data_[current_index_];
+    current_ = current_index_ << 5;
+  }
+  val = SkipZeroBytes(val);
+  val = SkipZeroBits(val);
+  current_value_ = val >> 1;
+}
+
+
 bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
-  info_ = info;
   Scope* scope = info->scope();
-  int variables = scope->num_parameters() + scope->num_stack_slots();
-  if (variables == 0) return true;
-  av_.ExpandTo(variables);
-  VisitStatements(info->function()->body());
+  int size = scope->num_parameters() + scope->num_stack_slots();
+  if (size == 0) return true;
+  AssignedVariablesAnalyzer analyzer(info, size);
+  return analyzer.Analyze();
+}
+
+
+AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
+                                                     int size)
+    : info_(info), av_(size) {
+}
+
+
+bool AssignedVariablesAnalyzer::Analyze() {
+  ASSERT(av_.length() > 0);
+  VisitStatements(info_->function()->body());
   return !HasStackOverflow();
 }
 
@@ -318,11 +343,6 @@
 }
 
 
-void AssignedVariablesAnalyzer::VisitSlot(Slot* expr) {
-  UNREACHABLE();
-}
-
-
 void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
   // Nothing to do.
   ASSERT(av_.IsEmpty());
diff --git a/src/data-flow.h b/src/data-flow.h
index efce1ea..6e2230c 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -42,10 +42,57 @@
 
 class BitVector: public ZoneObject {
  public:
-  BitVector() : length_(0), data_length_(0), data_(NULL) { }
+  // Iterator for the elements of this BitVector.
+  class Iterator BASE_EMBEDDED {
+   public:
+    explicit Iterator(BitVector* target)
+        : target_(target),
+          current_index_(0),
+          current_value_(target->data_[0]),
+          current_(-1) {
+      ASSERT(target->data_length_ > 0);
+      Advance();
+    }
+    ~Iterator() { }
 
-  explicit BitVector(int length) {
-    ExpandTo(length);
+    bool Done() const { return current_index_ >= target_->data_length_; }
+    void Advance();
+
+    int Current() const {
+      ASSERT(!Done());
+      return current_;
+    }
+
+   private:
+    uint32_t SkipZeroBytes(uint32_t val) {
+      while ((val & 0xFF) == 0) {
+        val >>= 8;
+        current_ += 8;
+      }
+      return val;
+    }
+    uint32_t SkipZeroBits(uint32_t val) {
+      while ((val & 0x1) == 0) {
+        val >>= 1;
+        current_++;
+      }
+      return val;
+    }
+
+    BitVector* target_;
+    int current_index_;
+    uint32_t current_value_;
+    int current_;
+
+    friend class BitVector;
+  };
+
+  explicit BitVector(int length)
+      : length_(length),
+        data_length_(SizeFor(length)),
+        data_(Zone::NewArray<uint32_t>(data_length_)) {
+    ASSERT(length > 0);
+    Clear();
   }
 
   BitVector(const BitVector& other)
@@ -55,12 +102,8 @@
     CopyFrom(other);
   }
 
-  void ExpandTo(int length) {
-    ASSERT(length > 0);
-    length_ = length;
-    data_length_ = SizeFor(length);
-    data_ = Zone::NewArray<uint32_t>(data_length_);
-    Clear();
+  static int SizeFor(int length) {
+    return 1 + ((length - 1) / 32);
   }
 
   BitVector& operator=(const BitVector& rhs) {
@@ -75,7 +118,7 @@
     }
   }
 
-  bool Contains(int i) {
+  bool Contains(int i) const {
     ASSERT(i >= 0 && i < length());
     uint32_t block = data_[i / 32];
     return (block & (1U << (i % 32))) != 0;
@@ -98,6 +141,17 @@
     }
   }
 
+  bool UnionIsChanged(const BitVector& other) {
+    ASSERT(other.length() == length());
+    bool changed = false;
+    for (int i = 0; i < data_length_; i++) {
+      uint32_t old_data = data_[i];
+      data_[i] |= other.data_[i];
+      if (data_[i] != old_data) changed = true;
+    }
+    return changed;
+  }
+
   void Intersect(const BitVector& other) {
     ASSERT(other.length() == length());
     for (int i = 0; i < data_length_; i++) {
@@ -139,16 +193,102 @@
 #endif
 
  private:
-  static int SizeFor(int length) {
-    return 1 + ((length - 1) / 32);
-  }
-
   int length_;
   int data_length_;
   uint32_t* data_;
 };
 
 
+// An implementation of a sparse set whose elements are drawn from integers
+// in the range [0..universe_size[.  It supports constant-time Contains,
+// destructive Add, and destructuve Remove operations and linear-time (in
+// the number of elements) destructive Union.
+class SparseSet: public ZoneObject {
+ public:
+  // Iterator for sparse set elements.  Elements should not be added or
+  // removed during iteration.
+  class Iterator BASE_EMBEDDED {
+   public:
+    explicit Iterator(SparseSet* target) : target_(target), current_(0) {
+      ASSERT(++target->iterator_count_ > 0);
+    }
+    ~Iterator() {
+      ASSERT(target_->iterator_count_-- > 0);
+    }
+    bool Done() const { return current_ >= target_->dense_.length(); }
+    void Advance() {
+      ASSERT(!Done());
+      ++current_;
+    }
+    int Current() {
+      ASSERT(!Done());
+      return target_->dense_[current_];
+    }
+
+   private:
+    SparseSet* target_;
+    int current_;
+
+    friend class SparseSet;
+  };
+
+  explicit SparseSet(int universe_size)
+      : dense_(4),
+        sparse_(Zone::NewArray<int>(universe_size)) {
+#ifdef DEBUG
+    size_ = universe_size;
+    iterator_count_ = 0;
+#endif
+  }
+
+  bool Contains(int n) const {
+    ASSERT(0 <= n && n < size_);
+    int dense_index = sparse_[n];
+    return (0 <= dense_index) &&
+        (dense_index < dense_.length()) &&
+        (dense_[dense_index] == n);
+  }
+
+  void Add(int n) {
+    ASSERT(0 <= n && n < size_);
+    ASSERT(iterator_count_ == 0);
+    if (!Contains(n)) {
+      sparse_[n] = dense_.length();
+      dense_.Add(n);
+    }
+  }
+
+  void Remove(int n) {
+    ASSERT(0 <= n && n < size_);
+    ASSERT(iterator_count_ == 0);
+    if (Contains(n)) {
+      int dense_index = sparse_[n];
+      int last = dense_.RemoveLast();
+      if (dense_index < dense_.length()) {
+        dense_[dense_index] = last;
+        sparse_[last] = dense_index;
+      }
+    }
+  }
+
+  void Union(const SparseSet& other) {
+    for (int i = 0; i < other.dense_.length(); ++i) {
+      Add(other.dense_[i]);
+    }
+  }
+
+ private:
+  // The set is implemented as a pair of a growable dense list and an
+  // uninitialized sparse array.
+  ZoneList<int> dense_;
+  int* sparse_;
+#ifdef DEBUG
+  int size_;
+  int iterator_count_;
+#endif
+};
+
+
 // Simple fixed-capacity list-based worklist (managed as a queue) of
 // pointers to T.
 template<typename T>
@@ -198,10 +338,12 @@
 // is guaranteed to be a smi.
 class AssignedVariablesAnalyzer : public AstVisitor {
  public:
-  explicit AssignedVariablesAnalyzer() : info_(NULL) { }
-  bool Analyze(CompilationInfo* info);
+  static bool Analyze(CompilationInfo* info);
 
  private:
+  AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
+  bool Analyze();
+
   Variable* FindSmiLoopVariable(ForStatement* stmt);
 
   int BitIndex(Variable* var);
diff --git a/src/date.js b/src/date.js
index 9601470..bc70327 100644
--- a/src/date.js
+++ b/src/date.js
@@ -81,12 +81,12 @@
 
 
 function InLeapYear(time) {
-  return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
+  return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
 }
 
 
 function DayWithinYear(time) {
-  return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
+  return DAY(time) - DayFromYear(YearFromTime(time));
 }
 
 
@@ -114,9 +114,9 @@
   // the actual year if it is in the range 1970..2037
   if (t >= 0 && t <= 2.1e12) return t;
 
-  var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)),
-                    MONTH_FROM_TIME(t),
-                    DATE_FROM_TIME(t));
+  var day = MakeDay(EquivalentYear(YearFromTime(t)),
+                    MonthFromTime(t),
+                    DateFromTime(t));
   return MakeDate(day, TimeWithinDay(t));
 }
 
@@ -253,9 +253,6 @@
 function LocalTimeNoCheck(time) {
   var ltc = ltcache;
   if (%_ObjectEquals(time, ltc.key)) return ltc.val;
-  if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
-    return $NaN;
-  }
 
   // Inline the DST offset cache checks for speed.
   // The cache is hit, or DaylightSavingsOffset is called,
@@ -371,16 +368,21 @@
 
 // ECMA 262 - 15.9.1.13
 function MakeDate(day, time) {
-  if (!$isFinite(day)) return $NaN;
-  if (!$isFinite(time)) return $NaN;
-  return day * msPerDay + time;
+  var time = day * msPerDay + time;
+  // Some of our runtime funtions for computing UTC(time) rely on
+  // times not being significantly larger than MAX_TIME_MS. If there
+  // is no way that the time can be within range even after UTC
+  // conversion we return NaN immediately instead of relying on
+  // TimeClip to do it.
+  if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN;
+  return time;
 }
 
 
 // ECMA 262 - 15.9.1.14
 function TimeClip(time) {
   if (!$isFinite(time)) return $NaN;
-  if ($abs(time) > 8.64E15) return $NaN;
+  if ($abs(time) > MAX_TIME_MS) return $NaN;
   return TO_INTEGER(time);
 }
 
@@ -424,7 +426,7 @@
         value = DateParse(year);
         if (!NUMBER_IS_NAN(value)) {
           cache.time = value;
-          cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
+          cache.year = YearFromTime(LocalTimeNoCheck(value));
           cache.string = year;
         }
       }
@@ -642,7 +644,7 @@
   if (NUMBER_IS_NAN(t)) return t;
   var cache = Date_cache;
   if (cache.time === t) return cache.year;
-  return YEAR_FROM_TIME(LocalTimeNoCheck(t));
+  return YearFromTime(LocalTimeNoCheck(t));
 }
 
 
@@ -650,7 +652,7 @@
 function DateGetUTCFullYear() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
-  return YEAR_FROM_TIME(t);
+  return YearFromTime(t);
 }
 
 
@@ -658,7 +660,7 @@
 function DateGetMonth() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
-  return MONTH_FROM_TIME(LocalTimeNoCheck(t));
+  return MonthFromTime(LocalTimeNoCheck(t));
 }
 
 
@@ -666,7 +668,7 @@
 function DateGetUTCMonth() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
-  return MONTH_FROM_TIME(t);
+  return MonthFromTime(t);
 }
 
 
@@ -674,7 +676,7 @@
 function DateGetDate() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return t;
-  return DATE_FROM_TIME(LocalTimeNoCheck(t));
+  return DateFromTime(LocalTimeNoCheck(t));
 }
 
 
@@ -869,7 +871,7 @@
 function DateSetDate(date) {
   var t = LocalTime(DATE_VALUE(this));
   date = ToNumber(date);
-  var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
+  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
   return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
@@ -878,7 +880,7 @@
 function DateSetUTCDate(date) {
   var t = DATE_VALUE(this);
   date = ToNumber(date);
-  var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
+  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
   return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
 }
 
@@ -888,7 +890,7 @@
   var t = LocalTime(DATE_VALUE(this));
   month = ToNumber(month);
   date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
-  var day = MakeDay(YEAR_FROM_TIME(t), month, date);
+  var day = MakeDay(YearFromTime(t), month, date);
   return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
@@ -898,7 +900,7 @@
   var t = DATE_VALUE(this);
   month = ToNumber(month);
   date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
-  var day = MakeDay(YEAR_FROM_TIME(t), month, date);
+  var day = MakeDay(YearFromTime(t), month, date);
   return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
 }
 
@@ -909,8 +911,8 @@
   t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
   year = ToNumber(year);
   var argc = %_ArgumentsLength();
-  month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
-  date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
+  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
   var day = MakeDay(year, month, date);
   return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
@@ -922,8 +924,8 @@
   if (NUMBER_IS_NAN(t)) t = 0;
   var argc = %_ArgumentsLength();
   year = ToNumber(year);
-  month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
-  date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
+  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
   var day = MakeDay(year, month, date);
   return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
 }
@@ -935,9 +937,9 @@
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
   // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
   return WeekDays[WeekDay(t)] + ', '
-      + TwoDigitString(DATE_FROM_TIME(t)) + ' '
-      + Months[MONTH_FROM_TIME(t)] + ' '
-      + YEAR_FROM_TIME(t) + ' '
+      + TwoDigitString(DateFromTime(t)) + ' '
+      + Months[MonthFromTime(t)] + ' '
+      + YearFromTime(t) + ' '
       + TimeString(t) + ' GMT';
 }
 
@@ -946,7 +948,7 @@
 function DateGetYear() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return $NaN;
-  return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
+  return YearFromTime(LocalTimeNoCheck(t)) - 1900;
 }
 
 
@@ -958,7 +960,7 @@
   if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
   year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
       ? 1900 + TO_INTEGER(year) : year;
-  var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
+  var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
   return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
@@ -984,16 +986,57 @@
 function DateToISOString() {
   var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
-      '-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
-      ':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
+  return this.getUTCFullYear() + 
+      '-' + PadInt(this.getUTCMonth() + 1, 2) +
+      '-' + PadInt(this.getUTCDate(), 2) + 
+      'T' + PadInt(this.getUTCHours(), 2) +
+      ':' + PadInt(this.getUTCMinutes(), 2) + 
+      ':' + PadInt(this.getUTCSeconds(), 2) +
       '.' + PadInt(this.getUTCMilliseconds(), 3) +
       'Z';
 }
 
 
 function DateToJSON(key) {
-  return CheckJSONPrimitive(this.toISOString());
+  var o = ToObject(this);
+  var tv = DefaultNumber(o);
+  if (IS_NUMBER(tv) && !$isFinite(tv)) { 
+    return null; 
+  }
+  return o.toISOString();
+}
+
+
+function ResetDateCache() {
+
+  // Reset the local_time_offset:
+  local_time_offset = %DateLocalTimeOffset();
+
+  // Reset the DST offset cache:
+  var cache = DST_offset_cache;
+  cache.offset = 0;
+  cache.start = 0;
+  cache.end = -1;
+  cache.increment = 0;
+  cache.initial_increment = 19 * msPerDay;
+
+  // Reset the timezone cache:
+  timezone_cache_time = $NaN;
+  timezone_cache_timezone = undefined;
+
+  // Reset the ltcache:
+  ltcache.key = null;
+  ltcache.val = null;
+
+  // Reset the ymd_from_time_cache:
+  ymd_from_time_cache = [$NaN, $NaN, $NaN];
+  ymd_from_time_cached_time = $NaN;
+
+  // Reset the date cache:
+  cache = Date_cache;
+  cache.time = $NaN;
+  cache.year = $NaN;
+  cache.string = null;
 }
 
 
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index d091991..090c661 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -858,6 +858,7 @@
   return debugger_flags;
 };
 
+Debug.MakeMirror = MakeMirror;
 
 function MakeExecutionState(break_id) {
   return new ExecutionState(break_id);
@@ -876,9 +877,11 @@
   return %PrepareStep(this.break_id, action, count);
 }
 
-ExecutionState.prototype.evaluateGlobal = function(source, disable_break) {
-  return MakeMirror(
-      %DebugEvaluateGlobal(this.break_id, source, Boolean(disable_break)));
+ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
+    opt_additional_context) {
+  return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
+                                         Boolean(disable_break),
+                                         opt_additional_context));
 };
 
 ExecutionState.prototype.frameCount = function() {
@@ -1837,6 +1840,7 @@
   var frame = request.arguments.frame;
   var global = request.arguments.global;
   var disable_break = request.arguments.disable_break;
+  var additional_context = request.arguments.additional_context;
 
   // The expression argument could be an integer so we convert it to a
   // string.
@@ -1850,12 +1854,30 @@
   if (!IS_UNDEFINED(frame) && global) {
     return response.failed('Arguments "frame" and "global" are exclusive');
   }
+  
+  var additional_context_object;
+  if (additional_context) {
+    additional_context_object = {};
+    for (var i = 0; i < additional_context.length; i++) {
+      var mapping = additional_context[i];
+      if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
+        return response.failed("Context element #" + i + 
+            " must contain name:string and handle:number");
+      } 
+      var context_value_mirror = LookupMirror(mapping.handle);
+      if (!context_value_mirror) {
+        return response.failed("Context object '" + mapping.name +
+            "' #" + mapping.handle + "# not found");
+      }
+      additional_context_object[mapping.name] = context_value_mirror.value(); 
+    }
+  }
 
   // Global evaluate.
   if (global) {
     // Evaluate in the global context.
-    response.body =
-        this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
+    response.body = this.exec_state_.evaluateGlobal(
+        expression, Boolean(disable_break), additional_context_object);
     return;
   }
 
@@ -1877,12 +1899,12 @@
     }
     // Evaluate in the specified frame.
     response.body = this.exec_state_.frame(frame_number).evaluate(
-        expression, Boolean(disable_break));
+        expression, Boolean(disable_break), additional_context_object);
     return;
   } else {
     // Evaluate in the selected frame.
     response.body = this.exec_state_.frame().evaluate(
-        expression, Boolean(disable_break));
+        expression, Boolean(disable_break), additional_context_object);
     return;
   }
 };
diff --git a/src/debug.cc b/src/debug.cc
index f3bf954..ca3c1db 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -35,6 +35,7 @@
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
 #include "ic.h"
@@ -140,7 +141,9 @@
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
       if ((code->is_inline_cache_stub() &&
-           code->kind() != Code::BINARY_OP_IC) ||
+           !code->is_binary_op_stub() &&
+           !code->is_type_recording_binary_op_stub() &&
+           !code->is_compare_ic_stub()) ||
           RelocInfo::IsConstructCall(rmode())) {
         break_point_++;
         return;
@@ -855,7 +858,7 @@
   if (caught_exception) return false;
 
   // Debugger loaded.
-  debug_context_ = Handle<Context>::cast(GlobalHandles::Create(*context));
+  debug_context_ = context;
 
   return true;
 }
@@ -1661,6 +1664,12 @@
   // Ensure shared in compiled. Return false if this failed.
   if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
 
+  // If preparing for the first break point make sure to deoptimize all
+  // functions as debugging does not work with optimized code.
+  if (!has_break_points_) {
+    Deoptimizer::DeoptimizeAll();
+  }
+
   // Create the debug info object.
   Handle<DebugInfo> debug_info = Factory::NewDebugInfo(shared);
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
new file mode 100644
index 0000000..dd70baa
--- /dev/null
+++ b/src/deoptimizer.cc
@@ -0,0 +1,1147 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "disasm.h"
+#include "full-codegen.h"
+#include "global-handles.h"
+#include "macro-assembler.h"
+#include "prettyprinter.h"
+
+
+namespace v8 {
+namespace internal {
+
+LargeObjectChunk* Deoptimizer::eager_deoptimization_entry_code_ = NULL;
+LargeObjectChunk* Deoptimizer::lazy_deoptimization_entry_code_ = NULL;
+Deoptimizer* Deoptimizer::current_ = NULL;
+DeoptimizingCodeListNode* Deoptimizer::deoptimizing_code_list_ = NULL;
+
+
+Deoptimizer* Deoptimizer::New(JSFunction* function,
+                              BailoutType type,
+                              unsigned bailout_id,
+                              Address from,
+                              int fp_to_sp_delta) {
+  Deoptimizer* deoptimizer =
+      new Deoptimizer(function, type, bailout_id, from, fp_to_sp_delta);
+  ASSERT(current_ == NULL);
+  current_ = deoptimizer;
+  return deoptimizer;
+}
+
+
+Deoptimizer* Deoptimizer::Grab() {
+  Deoptimizer* result = current_;
+  ASSERT(result != NULL);
+  result->DeleteFrameDescriptions();
+  current_ = NULL;
+  return result;
+}
+
+
+void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
+                                                int count,
+                                                BailoutType type) {
+  TableEntryGenerator generator(masm, type, count);
+  generator.Generate();
+}
+
+
+class DeoptimizingVisitor : public OptimizedFunctionVisitor {
+ public:
+  virtual void EnterContext(Context* context) {
+    if (FLAG_trace_deopt) {
+      PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
+             reinterpret_cast<intptr_t>(context));
+    }
+  }
+
+  virtual void VisitFunction(JSFunction* function) {
+    Deoptimizer::DeoptimizeFunction(function);
+  }
+
+  virtual void LeaveContext(Context* context) {
+    context->ClearOptimizedFunctions();
+  }
+};
+
+
+void Deoptimizer::DeoptimizeAll() {
+  AssertNoAllocation no_allocation;
+
+  if (FLAG_trace_deopt) {
+    PrintF("[deoptimize all contexts]\n");
+  }
+
+  DeoptimizingVisitor visitor;
+  VisitAllOptimizedFunctions(&visitor);
+}
+
+
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+  AssertNoAllocation no_allocation;
+
+  DeoptimizingVisitor visitor;
+  VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
+}
+
+
+void Deoptimizer::VisitAllOptimizedFunctionsForContext(
+    Context* context, OptimizedFunctionVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  ASSERT(context->IsGlobalContext());
+
+  visitor->EnterContext(context);
+  // Run through the list of optimized functions and deoptimize them.
+  Object* element = context->OptimizedFunctionsListHead();
+  while (!element->IsUndefined()) {
+    JSFunction* element_function = JSFunction::cast(element);
+    // Get the next link before deoptimizing as deoptimizing will clear the
+    // next link.
+    element = element_function->next_function_link();
+    visitor->VisitFunction(element_function);
+  }
+  visitor->LeaveContext(context);
+}
+
+
+void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
+    JSObject* object, OptimizedFunctionVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  if (object->IsJSGlobalProxy()) {
+    Object* proto = object->GetPrototype();
+    ASSERT(proto->IsJSGlobalObject());
+    VisitAllOptimizedFunctionsForContext(
+        GlobalObject::cast(proto)->global_context(), visitor);
+  } else if (object->IsGlobalObject()) {
+    VisitAllOptimizedFunctionsForContext(
+        GlobalObject::cast(object)->global_context(), visitor);
+  }
+}
+
+
+void Deoptimizer::VisitAllOptimizedFunctions(
+    OptimizedFunctionVisitor* visitor) {
+  AssertNoAllocation no_allocation;
+
+  // Run through the list of all global contexts and deoptimize.
+  Object* global = Heap::global_contexts_list();
+  while (!global->IsUndefined()) {
+    VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
+                                              visitor);
+    global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Deoptimizer::HandleWeakDeoptimizedCode(
+    v8::Persistent<v8::Value> obj, void* data) {
+  DeoptimizingCodeListNode* node =
+      reinterpret_cast<DeoptimizingCodeListNode*>(data);
+  RemoveDeoptimizingCode(*node->code());
+#ifdef DEBUG
+  node = Deoptimizer::deoptimizing_code_list_;
+  while (node != NULL) {
+    ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
+    node = node->next();
+  }
+#endif
+}
+
+
+void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
+  deoptimizer->DoComputeOutputFrames();
+}
+
+
+Deoptimizer::Deoptimizer(JSFunction* function,
+                         BailoutType type,
+                         unsigned bailout_id,
+                         Address from,
+                         int fp_to_sp_delta)
+    : function_(function),
+      bailout_id_(bailout_id),
+      bailout_type_(type),
+      from_(from),
+      fp_to_sp_delta_(fp_to_sp_delta),
+      output_count_(0),
+      output_(NULL),
+      integer32_values_(NULL),
+      double_values_(NULL) {
+  if (FLAG_trace_deopt && type != OSR) {
+    PrintF("**** DEOPT: ");
+    function->PrintName();
+    PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
+           bailout_id,
+           reinterpret_cast<intptr_t>(from),
+           fp_to_sp_delta - (2 * kPointerSize));
+  } else if (FLAG_trace_osr && type == OSR) {
+    PrintF("**** OSR: ");
+    function->PrintName();
+    PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
+           bailout_id,
+           reinterpret_cast<intptr_t>(from),
+           fp_to_sp_delta - (2 * kPointerSize));
+  }
+  // Find the optimized code.
+  if (type == EAGER) {
+    ASSERT(from == NULL);
+    optimized_code_ = function_->code();
+  } else if (type == LAZY) {
+    optimized_code_ = FindDeoptimizingCodeFromAddress(from);
+    ASSERT(optimized_code_ != NULL);
+  } else if (type == OSR) {
+    // The function has already been optimized and we're transitioning
+    // from the unoptimized shared version to the optimized one in the
+    // function. The return address (from) points to unoptimized code.
+    optimized_code_ = function_->code();
+    ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
+    ASSERT(!optimized_code_->contains(from));
+  }
+  ASSERT(Heap::allow_allocation(false));
+  unsigned size = ComputeInputFrameSize();
+  input_ = new(size) FrameDescription(size, function);
+}
+
+
+Deoptimizer::~Deoptimizer() {
+  ASSERT(input_ == NULL && output_ == NULL);
+  delete[] integer32_values_;
+  delete[] double_values_;
+}
+
+
+void Deoptimizer::DeleteFrameDescriptions() {
+  delete input_;
+  for (int i = 0; i < output_count_; ++i) {
+    if (output_[i] != input_) delete output_[i];
+  }
+  delete[] output_;
+  input_ = NULL;
+  output_ = NULL;
+  ASSERT(!Heap::allow_allocation(true));
+}
+
+
+Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
+  ASSERT(id >= 0);
+  if (id >= kNumberOfEntries) return NULL;
+  LargeObjectChunk* base = NULL;
+  if (type == EAGER) {
+    if (eager_deoptimization_entry_code_ == NULL) {
+      eager_deoptimization_entry_code_ = CreateCode(type);
+    }
+    base = eager_deoptimization_entry_code_;
+  } else {
+    if (lazy_deoptimization_entry_code_ == NULL) {
+      lazy_deoptimization_entry_code_ = CreateCode(type);
+    }
+    base = lazy_deoptimization_entry_code_;
+  }
+  return
+      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
+}
+
+
+int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
+  LargeObjectChunk* base = NULL;
+  if (type == EAGER) {
+    base = eager_deoptimization_entry_code_;
+  } else {
+    base = lazy_deoptimization_entry_code_;
+  }
+  if (base == NULL ||
+      addr < base->GetStartAddress() ||
+      addr >= base->GetStartAddress() +
+          (kNumberOfEntries * table_entry_size_)) {
+    return kNotDeoptimizationEntry;
+  }
+  ASSERT_EQ(0,
+      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
+  return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
+}
+
+
+void Deoptimizer::Setup() {
+  // Do nothing yet.
+}
+
+
+void Deoptimizer::TearDown() {
+  if (eager_deoptimization_entry_code_ != NULL) {
+    eager_deoptimization_entry_code_->Free(EXECUTABLE);
+    eager_deoptimization_entry_code_ = NULL;
+  }
+  if (lazy_deoptimization_entry_code_ != NULL) {
+    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+    lazy_deoptimization_entry_code_ = NULL;
+  }
+}
+
+
+unsigned Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
+                                    unsigned id,
+                                    SharedFunctionInfo* shared) {
+  // TODO(kasperl): For now, we do a simple linear search for the PC
+  // offset associated with the given node id. This should probably be
+  // changed to a binary search.
+  int length = data->DeoptPoints();
+  Smi* smi_id = Smi::FromInt(id);
+  for (int i = 0; i < length; i++) {
+    if (data->AstId(i) == smi_id) {
+      return data->PcAndState(i)->value();
+    }
+  }
+  PrintF("[couldn't find pc offset for node=%u]\n", id);
+  PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
+  // Print the source code if available.
+  HeapStringAllocator string_allocator;
+  StringStream stream(&string_allocator);
+  shared->SourceCodePrint(&stream, -1);
+  PrintF("[source:\n%s\n]", *stream.ToCString());
+
+  UNREACHABLE();
+  return -1;
+}
+
+
+int Deoptimizer::GetDeoptimizedCodeCount() {
+  int length = 0;
+  DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+  while (node != NULL) {
+    length++;
+    node = node->next();
+  }
+  return length;
+}
+
+
+void Deoptimizer::DoComputeOutputFrames() {
+  if (bailout_type_ == OSR) {
+    DoComputeOsrOutputFrame();
+    return;
+  }
+
+  // Print some helpful diagnostic information.
+  int64_t start = OS::Ticks();
+  if (FLAG_trace_deopt) {
+    PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
+           (bailout_type_ == LAZY ? " (lazy)" : ""),
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" @%d]\n", bailout_id_);
+  }
+
+  // Determine basic deoptimization information.  The optimized frame is
+  // described by the input data.
+  DeoptimizationInputData* input_data =
+      DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+  unsigned node_id = input_data->AstId(bailout_id_)->value();
+  ByteArray* translations = input_data->TranslationByteArray();
+  unsigned translation_index =
+      input_data->TranslationIndex(bailout_id_)->value();
+
+  // Do the input frame to output frame(s) translation.
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  // Read the number of output frames and allocate an array for their
+  // descriptions.
+  int count = iterator.Next();
+  ASSERT(output_ == NULL);
+  output_ = new FrameDescription*[count];
+  // Per-frame lists of untagged and unboxed int32 and double values.
+  integer32_values_ = new List<ValueDescriptionInteger32>[count];
+  double_values_ = new List<ValueDescriptionDouble>[count];
+  for (int i = 0; i < count; ++i) {
+    output_[i] = NULL;
+    integer32_values_[i].Initialize(0);
+    double_values_[i].Initialize(0);
+  }
+  output_count_ = count;
+
+  // Translate each output frame.
+  for (int i = 0; i < count; ++i) {
+    DoComputeFrame(&iterator, i);
+  }
+
+  // Print some helpful diagnostic information.
+  if (FLAG_trace_deopt) {
+    double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+    int index = output_count_ - 1;  // Index of the topmost frame.
+    JSFunction* function = output_[index]->GetFunction();
+    PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
+           node_id,
+           output_[index]->GetPc(),
+           FullCodeGenerator::State2String(
+               static_cast<FullCodeGenerator::State>(
+                   output_[index]->GetState()->value())),
+           ms);
+  }
+}
+
+
+void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
+  // We need to adjust the stack index by one for the top-most frame.
+  int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
+  List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
+  for (int i = 0; i < ints->length(); i++) {
+    ValueDescriptionInteger32 value = ints->at(i);
+    double val = static_cast<double>(value.int32_value());
+    InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
+  }
+
+  // Iterate over double values and convert them to a heap number.
+  List<ValueDescriptionDouble>* doubles = &double_values_[index];
+  for (int i = 0; i < doubles->length(); ++i) {
+    ValueDescriptionDouble value = doubles->at(i);
+    InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
+                          extra_slot_count);
+  }
+}
+
+
+void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
+                                        int stack_index,
+                                        double val,
+                                        int extra_slot_count) {
+  // Add one to the TOS index to take the 'state' pushed before jumping
+  // to the stub that calls Runtime::NotifyDeoptimized into account.
+  int tos_index = stack_index + extra_slot_count;
+  int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
+  if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
+  Handle<Object> num = Factory::NewNumber(val);
+  frame->SetExpression(index, *num);
+}
+
+
+void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
+                                     int frame_index,
+                                     unsigned output_offset) {
+  disasm::NameConverter converter;
+  // A GC-safe temporary placeholder that we can put in the output frame.
+  const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+
+  // Ignore commands marked as duplicate and act on the first non-duplicate.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  while (opcode == Translation::DUPLICATE) {
+    opcode = static_cast<Translation::Opcode>(iterator->Next());
+    iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+    opcode = static_cast<Translation::Opcode>(iterator->Next());
+  }
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+    case Translation::DUPLICATE:
+      UNREACHABLE();
+      return;
+
+    case Translation::REGISTER: {
+      int input_reg = iterator->Next();
+      intptr_t input_value = input_->GetRegister(input_reg);
+      if (FLAG_trace_deopt) {
+        PrintF(
+            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
+            output_[frame_index]->GetTop() + output_offset,
+            output_offset,
+            input_value,
+            converter.NameOfCPURegister(input_reg));
+      }
+      output_[frame_index]->SetFrameSlot(output_offset, input_value);
+      return;
+    }
+
+    case Translation::INT32_REGISTER: {
+      int input_reg = iterator->Next();
+      intptr_t value = input_->GetRegister(input_reg);
+      bool is_smi = Smi::IsValid(value);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF(
+            "    0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
+            output_[frame_index]->GetTop() + output_offset,
+            output_offset,
+            value,
+            converter.NameOfCPURegister(input_reg),
+            is_smi ? "smi" : "heap number");
+      }
+      if (is_smi) {
+        intptr_t tagged_value =
+            reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+        output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+      } else {
+        // We save the untagged value on the side and store a GC-safe
+        // temporary placeholder in the frame.
+        AddInteger32Value(frame_index,
+                          output_index,
+                          static_cast<int32_t>(value));
+        output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      }
+      return;
+    }
+
+    case Translation::DOUBLE_REGISTER: {
+      int input_reg = iterator->Next();
+      double value = input_->GetDoubleRegister(input_reg);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               value,
+               DoubleRegister::AllocationIndexToString(input_reg));
+      }
+      // We save the untagged value on the side and store a GC-safe
+      // temporary placeholder in the frame.
+      AddDoubleValue(frame_index, output_index, value);
+      output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      return;
+    }
+
+    case Translation::STACK_SLOT: {
+      int input_slot_index = iterator->Next();
+      unsigned input_offset =
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+      intptr_t input_value = input_->GetFrameSlot(input_offset);
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08" V8PRIxPTR ": ",
+               output_[frame_index]->GetTop() + output_offset);
+        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+               output_offset,
+               input_value,
+               input_offset);
+      }
+      output_[frame_index]->SetFrameSlot(output_offset, input_value);
+      return;
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      int input_slot_index = iterator->Next();
+      unsigned input_offset =
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+      intptr_t value = input_->GetFrameSlot(input_offset);
+      bool is_smi = Smi::IsValid(value);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08" V8PRIxPTR ": ",
+               output_[frame_index]->GetTop() + output_offset);
+        PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
+               output_offset,
+               value,
+               input_offset,
+               is_smi ? "smi" : "heap number");
+      }
+      if (is_smi) {
+        intptr_t tagged_value =
+            reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
+        output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
+      } else {
+        // We save the untagged value on the side and store a GC-safe
+        // temporary placeholder in the frame.
+        AddInteger32Value(frame_index,
+                          output_index,
+                          static_cast<int32_t>(value));
+        output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      }
+      return;
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      int input_slot_index = iterator->Next();
+      unsigned input_offset =
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
+      double value = input_->GetDoubleFrameSlot(input_offset);
+      unsigned output_index = output_offset / kPointerSize;
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset,
+               value,
+               input_offset);
+      }
+      // We save the untagged value on the side and store a GC-safe
+      // temporary placeholder in the frame.
+      AddDoubleValue(frame_index, output_index, value);
+      output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
+      return;
+    }
+
+    case Translation::LITERAL: {
+      Object* literal = ComputeLiteral(iterator->Next());
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- ",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset);
+        literal->ShortPrint();
+        PrintF(" ; literal\n");
+      }
+      intptr_t value = reinterpret_cast<intptr_t>(literal);
+      output_[frame_index]->SetFrameSlot(output_offset, value);
+      return;
+    }
+
+    case Translation::ARGUMENTS_OBJECT: {
+      // Use the hole value as a sentinel and fill in the arguments object
+      // after the deoptimized frame is built.
+      ASSERT(frame_index == 0);  // Only supported for first frame.
+      if (FLAG_trace_deopt) {
+        PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- ",
+               output_[frame_index]->GetTop() + output_offset,
+               output_offset);
+        Heap::the_hole_value()->ShortPrint();
+        PrintF(" ; arguments object\n");
+      }
+      intptr_t value = reinterpret_cast<intptr_t>(Heap::the_hole_value());
+      output_[frame_index]->SetFrameSlot(output_offset, value);
+      return;
+    }
+  }
+}
+
+
+bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
+                                        int* input_offset) {
+  disasm::NameConverter converter;
+  FrameDescription* output = output_[0];
+
+  // The input values are all part of the unoptimized frame so they
+  // are all tagged pointers.
+  uintptr_t input_value = input_->GetFrameSlot(*input_offset);
+  Object* input_object = reinterpret_cast<Object*>(input_value);
+
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  bool duplicate = (opcode == Translation::DUPLICATE);
+  if (duplicate) {
+    opcode = static_cast<Translation::Opcode>(iterator->Next());
+  }
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+    case Translation::DUPLICATE:
+      UNREACHABLE();  // Malformed input.
+       return false;
+
+     case Translation::REGISTER: {
+       int output_reg = iterator->Next();
+       if (FLAG_trace_osr) {
+         PrintF("    %s <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+                converter.NameOfCPURegister(output_reg),
+                input_value,
+                *input_offset);
+       }
+       output->SetRegister(output_reg, input_value);
+       break;
+     }
+
+    case Translation::INT32_REGISTER: {
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_reg = iterator->Next();
+      int int32_value = input_object->IsSmi()
+          ? Smi::cast(input_object)->value()
+          : FastD2I(input_object->Number());
+      // Abort the translation if the conversion lost information.
+      if (!input_object->IsSmi() &&
+          FastI2D(int32_value) != input_object->Number()) {
+        if (FLAG_trace_osr) {
+          PrintF("**** %g could not be converted to int32 ****\n",
+                 input_object->Number());
+        }
+        return false;
+      }
+      if (FLAG_trace_osr) {
+        PrintF("    %s <- %d (int32) ; [esp + %d]\n",
+               converter.NameOfCPURegister(output_reg),
+               int32_value,
+               *input_offset);
+      }
+      output->SetRegister(output_reg, int32_value);
+      break;
+    }
+
+    case Translation::DOUBLE_REGISTER: {
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_reg = iterator->Next();
+      double double_value = input_object->Number();
+      if (FLAG_trace_osr) {
+        PrintF("    %s <- %g (double) ; [esp + %d]\n",
+               DoubleRegister::AllocationIndexToString(output_reg),
+               double_value,
+               *input_offset);
+      }
+      output->SetDoubleRegister(output_reg, double_value);
+      break;
+    }
+
+    case Translation::STACK_SLOT: {
+      int output_index = iterator->Next();
+      unsigned output_offset =
+          output->GetOffsetFromSlotIndex(this, output_index);
+      if (FLAG_trace_osr) {
+        PrintF("    [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+               output_offset,
+               input_value,
+               *input_offset);
+      }
+      output->SetFrameSlot(output_offset, input_value);
+      break;
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_index = iterator->Next();
+      unsigned output_offset =
+          output->GetOffsetFromSlotIndex(this, output_index);
+      int int32_value = input_object->IsSmi()
+          ? Smi::cast(input_object)->value()
+          : DoubleToInt32(input_object->Number());
+      // Abort the translation if the conversion lost information.
+      if (!input_object->IsSmi() &&
+          FastI2D(int32_value) != input_object->Number()) {
+        if (FLAG_trace_osr) {
+          PrintF("**** %g could not be converted to int32 ****\n",
+                 input_object->Number());
+        }
+        return false;
+      }
+      if (FLAG_trace_osr) {
+        PrintF("    [esp + %d] <- %d (int32) ; [esp + %d]\n",
+               output_offset,
+               int32_value,
+               *input_offset);
+      }
+      output->SetFrameSlot(output_offset, int32_value);
+      break;
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      static const int kLowerOffset = 0 * kPointerSize;
+      static const int kUpperOffset = 1 * kPointerSize;
+
+      // Abort OSR if we don't have a number.
+      if (!input_object->IsNumber()) return false;
+
+      int output_index = iterator->Next();
+      unsigned output_offset =
+          output->GetOffsetFromSlotIndex(this, output_index);
+      double double_value = input_object->Number();
+      uint64_t int_value = BitCast<uint64_t, double>(double_value);
+      int32_t lower = static_cast<int32_t>(int_value);
+      int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
+      if (FLAG_trace_osr) {
+        PrintF("    [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n",
+               output_offset + kUpperOffset,
+               upper,
+               double_value,
+               *input_offset);
+        PrintF("    [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n",
+               output_offset + kLowerOffset,
+               lower,
+               double_value,
+               *input_offset);
+      }
+      output->SetFrameSlot(output_offset + kLowerOffset, lower);
+      output->SetFrameSlot(output_offset + kUpperOffset, upper);
+      break;
+    }
+
+    case Translation::LITERAL: {
+      // Just ignore non-materialized literals.
+      iterator->Next();
+      break;
+    }
+
+    case Translation::ARGUMENTS_OBJECT: {
+      // Optimized code assumes that the argument object has not been
+      // materialized and so bypasses it when doing arguments access.
+      // We should have bailed out before starting the frame
+      // translation.
+      UNREACHABLE();
+      return false;
+    }
+  }
+
+  if (!duplicate) *input_offset -= kPointerSize;
+  return true;
+}
+
+
+unsigned Deoptimizer::ComputeInputFrameSize() const {
+  unsigned fixed_size = ComputeFixedSize(function_);
+  // The fp-to-sp delta already takes the context and the function
+  // into account so we have to avoid double counting them (-2).
+  unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
+#ifdef DEBUG
+  if (bailout_type_ == OSR) {
+    // TODO(kasperl): It would be nice if we could verify that the
+    // size matches with the stack height we can compute based on the
+    // environment at the OSR entry. The code for that his built into
+    // the DoComputeOsrOutputFrame function for now.
+  } else {
+    unsigned stack_slots = optimized_code_->stack_slots();
+    unsigned outgoing_size = ComputeOutgoingArgumentSize();
+    ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
+  }
+#endif
+  return result;
+}
+
+
+unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
+  // The fixed part of the frame consists of the return address, frame
+  // pointer, function, context, and all the incoming arguments.
+  static const unsigned kFixedSlotSize = 4 * kPointerSize;
+  return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
+}
+
+
+unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
+  // The incoming arguments is the values for formal parameters and
+  // the receiver. Every slot contains a pointer.
+  unsigned arguments = function->shared()->formal_parameter_count() + 1;
+  return arguments * kPointerSize;
+}
+
+
+unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
+  return height * kPointerSize;
+}
+
+
+Object* Deoptimizer::ComputeLiteral(int index) const {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  FixedArray* literals = data->LiteralArray();
+  return literals->get(index);
+}
+
+
+void Deoptimizer::AddInteger32Value(int frame_index,
+                                    int slot_index,
+                                    int32_t value) {
+  ValueDescriptionInteger32 value_desc(slot_index, value);
+  integer32_values_[frame_index].Add(value_desc);
+}
+
+
+void Deoptimizer::AddDoubleValue(int frame_index,
+                                 int slot_index,
+                                 double value) {
+  ValueDescriptionDouble value_desc(slot_index, value);
+  double_values_[frame_index].Add(value_desc);
+}
+
+
+LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
+  // We cannot run this if the serializer is enabled because this will
+  // cause us to emit relocation information for the external
+  // references. This is fine because the deoptimizer's code section
+  // isn't meant to be serialized at all.
+  ASSERT(!Serializer::enabled());
+  bool old_debug_code = FLAG_debug_code;
+  FLAG_debug_code = false;
+
+  MacroAssembler masm(NULL, 16 * KB);
+  GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(desc.reloc_size == 0);
+
+  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  FLAG_debug_code = old_debug_code;
+  return chunk;
+}
+
+
+Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
+  DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
+  while (node != NULL) {
+    if (node->code()->contains(addr)) return *node->code();
+    node = node->next();
+  }
+  return NULL;
+}
+
+
+void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
+  ASSERT(deoptimizing_code_list_ != NULL);
+  // Run through the code objects to find this one and remove it.
+  DeoptimizingCodeListNode* prev = NULL;
+  DeoptimizingCodeListNode* current = deoptimizing_code_list_;
+  while (current != NULL) {
+    if (*current->code() == code) {
+      // Unlink from list. If prev is NULL we are looking at the first element.
+      if (prev == NULL) {
+        deoptimizing_code_list_ = current->next();
+      } else {
+        prev->set_next(current->next());
+      }
+      delete current;
+      return;
+    }
+    // Move to next in list.
+    prev = current;
+    current = current->next();
+  }
+  // Deoptimizing code is removed through weak callback. Each object is expected
+  // to be removed once and only once.
+  UNREACHABLE();
+}
+
+
+FrameDescription::FrameDescription(uint32_t frame_size,
+                                   JSFunction* function)
+    : frame_size_(frame_size),
+      function_(function),
+      top_(kZapUint32),
+      pc_(kZapUint32),
+      fp_(kZapUint32) {
+  // Zap all the registers.
+  for (int r = 0; r < Register::kNumRegisters; r++) {
+    SetRegister(r, kZapUint32);
+  }
+
+  // Zap all the slots.
+  for (unsigned o = 0; o < frame_size; o += kPointerSize) {
+    SetFrameSlot(o, kZapUint32);
+  }
+}
+
+
+unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
+                                                  int slot_index) {
+  if (slot_index >= 0) {
+    // Local or spill slots. Skip the fixed part of the frame
+    // including all arguments.
+    unsigned base = static_cast<unsigned>(
+        GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
+    return base - ((slot_index + 1) * kPointerSize);
+  } else {
+    // Incoming parameter.
+    unsigned base = static_cast<unsigned>(GetFrameSize() -
+        deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
+    return base - ((slot_index + 1) * kPointerSize);
+  }
+}
+
+
+void TranslationBuffer::Add(int32_t value) {
+  // Encode the sign bit in the least significant bit.
+  bool is_negative = (value < 0);
+  uint32_t bits = ((is_negative ? -value : value) << 1) |
+      static_cast<int32_t>(is_negative);
+  // Encode the individual bytes using the least significant bit of
+  // each byte to indicate whether or not more bytes follow.
+  do {
+    uint32_t next = bits >> 7;
+    contents_.Add(((bits << 1) & 0xFF) | (next != 0));
+    bits = next;
+  } while (bits != 0);
+}
+
+
+int32_t TranslationIterator::Next() {
+  ASSERT(HasNext());
+  // Run through the bytes until we reach one with a least significant
+  // bit of zero (marks the end).
+  uint32_t bits = 0;
+  for (int i = 0; true; i += 7) {
+    uint8_t next = buffer_->get(index_++);
+    bits |= (next >> 1) << i;
+    if ((next & 1) == 0) break;
+  }
+  // The bits encode the sign in the least significant bit.
+  bool is_negative = (bits & 1) == 1;
+  int32_t result = bits >> 1;
+  return is_negative ? -result : result;
+}
+
+
+Handle<ByteArray> TranslationBuffer::CreateByteArray() {
+  int length = contents_.length();
+  Handle<ByteArray> result = Factory::NewByteArray(length, TENURED);
+  memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+  return result;
+}
+
+
+void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
+  buffer_->Add(FRAME);
+  buffer_->Add(node_id);
+  buffer_->Add(literal_id);
+  buffer_->Add(height);
+}
+
+
+void Translation::StoreRegister(Register reg) {
+  buffer_->Add(REGISTER);
+  buffer_->Add(reg.code());
+}
+
+
+void Translation::StoreInt32Register(Register reg) {
+  buffer_->Add(INT32_REGISTER);
+  buffer_->Add(reg.code());
+}
+
+
+void Translation::StoreDoubleRegister(DoubleRegister reg) {
+  buffer_->Add(DOUBLE_REGISTER);
+  buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+void Translation::StoreStackSlot(int index) {
+  buffer_->Add(STACK_SLOT);
+  buffer_->Add(index);
+}
+
+
+void Translation::StoreInt32StackSlot(int index) {
+  buffer_->Add(INT32_STACK_SLOT);
+  buffer_->Add(index);
+}
+
+
+void Translation::StoreDoubleStackSlot(int index) {
+  buffer_->Add(DOUBLE_STACK_SLOT);
+  buffer_->Add(index);
+}
+
+
+void Translation::StoreLiteral(int literal_id) {
+  buffer_->Add(LITERAL);
+  buffer_->Add(literal_id);
+}
+
+
+void Translation::StoreArgumentsObject() {
+  buffer_->Add(ARGUMENTS_OBJECT);
+}
+
+
+void Translation::MarkDuplicate() {
+  buffer_->Add(DUPLICATE);
+}
+
+
+int Translation::NumberOfOperandsFor(Opcode opcode) {
+  switch (opcode) {
+    case ARGUMENTS_OBJECT:
+    case DUPLICATE:
+      return 0;
+    case BEGIN:
+    case REGISTER:
+    case INT32_REGISTER:
+    case DOUBLE_REGISTER:
+    case STACK_SLOT:
+    case INT32_STACK_SLOT:
+    case DOUBLE_STACK_SLOT:
+    case LITERAL:
+      return 1;
+    case FRAME:
+      return 3;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+#ifdef OBJECT_PRINT
+
+const char* Translation::StringFor(Opcode opcode) {
+  switch (opcode) {
+    case BEGIN:
+      return "BEGIN";
+    case FRAME:
+      return "FRAME";
+    case REGISTER:
+      return "REGISTER";
+    case INT32_REGISTER:
+      return "INT32_REGISTER";
+    case DOUBLE_REGISTER:
+      return "DOUBLE_REGISTER";
+    case STACK_SLOT:
+      return "STACK_SLOT";
+    case INT32_STACK_SLOT:
+      return "INT32_STACK_SLOT";
+    case DOUBLE_STACK_SLOT:
+      return "DOUBLE_STACK_SLOT";
+    case LITERAL:
+      return "LITERAL";
+    case ARGUMENTS_OBJECT:
+      return "ARGUMENTS_OBJECT";
+    case DUPLICATE:
+      return "DUPLICATE";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+#endif
+
+
+DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
+  // Globalize the code object and make it weak.
+  code_ = Handle<Code>::cast((GlobalHandles::Create(code)));
+  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(code_.location()),
+                          this,
+                          Deoptimizer::HandleWeakDeoptimizedCode);
+}
+
+
+DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(code_.location()));
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
new file mode 100644
index 0000000..2d7dfc8
--- /dev/null
+++ b/src/deoptimizer.h
@@ -0,0 +1,511 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_DEOPTIMIZER_H_
+#define V8_DEOPTIMIZER_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "zone-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+class FrameDescription;
+class TranslationIterator;
+class DeoptimizingCodeListNode;
+
+
+class ValueDescription BASE_EMBEDDED {
+ public:
+  explicit ValueDescription(int index) : stack_index_(index) { }
+  int stack_index() const { return stack_index_; }
+
+ private:
+  // Offset relative to the top of the stack.
+  int stack_index_;
+};
+
+
+class ValueDescriptionInteger32: public ValueDescription {
+ public:
+  ValueDescriptionInteger32(int index, int32_t value)
+      : ValueDescription(index), int32_value_(value) { }
+  int32_t int32_value() const { return int32_value_; }
+
+ private:
+  // Raw value.
+  int32_t int32_value_;
+};
+
+
+class ValueDescriptionDouble: public ValueDescription {
+ public:
+  ValueDescriptionDouble(int index, double value)
+      : ValueDescription(index), double_value_(value) { }
+  double double_value() const { return double_value_; }
+
+ private:
+  // Raw value.
+  double double_value_;
+};
+
+
+class OptimizedFunctionVisitor BASE_EMBEDDED {
+ public:
+  virtual ~OptimizedFunctionVisitor() {}
+
+  // Function which is called before iteration of any optimized functions
+  // from given global context.
+  virtual void EnterContext(Context* context) = 0;
+
+  virtual void VisitFunction(JSFunction* function) = 0;
+
+  // Function which is called after iteration of all optimized functions
+  // from given global context.
+  virtual void LeaveContext(Context* context) = 0;
+};
+
+
+class Deoptimizer : public Malloced {
+ public:
+  enum BailoutType {
+    EAGER,
+    LAZY,
+    OSR
+  };
+
+  int output_count() const { return output_count_; }
+
+  static Deoptimizer* New(JSFunction* function,
+                          BailoutType type,
+                          unsigned bailout_id,
+                          Address from,
+                          int fp_to_sp_delta);
+  static Deoptimizer* Grab();
+
+  // Deoptimize the function now. Its current optimized code will never be run
+  // again and any activations of the optimized code will get deoptimized when
+  // execution returns.
+  static void DeoptimizeFunction(JSFunction* function);
+
+  // Deoptimize all functions in the heap.
+  static void DeoptimizeAll();
+
+  static void DeoptimizeGlobalObject(JSObject* object);
+
+  static void VisitAllOptimizedFunctionsForContext(
+      Context* context, OptimizedFunctionVisitor* visitor);
+
+  static void VisitAllOptimizedFunctionsForGlobalObject(
+      JSObject* object, OptimizedFunctionVisitor* visitor);
+
+  static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
+
+  // Given the relocation info of a call to the stack check stub, patch the
+  // code so as to go unconditionally to the on-stack replacement builtin
+  // instead.
+  static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
+
+  // Given the relocation info of a call to the on-stack replacement
+  // builtin, patch the code back to the original stack check code.
+  static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
+
+  ~Deoptimizer();
+
+  void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
+
+  static void ComputeOutputFrames(Deoptimizer* deoptimizer);
+
+  static Address GetDeoptimizationEntry(int id, BailoutType type);
+  static int GetDeoptimizationId(Address addr, BailoutType type);
+  static unsigned GetOutputInfo(DeoptimizationOutputData* data,
+                                unsigned node_id,
+                                SharedFunctionInfo* shared);
+
+  static void Setup();
+  static void TearDown();
+
+  // Code generation support.
+  static int input_offset() { return OFFSET_OF(Deoptimizer, input_); }
+  static int output_count_offset() {
+    return OFFSET_OF(Deoptimizer, output_count_);
+  }
+  static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+
+  static int GetDeoptimizedCodeCount();
+
+  static const int kNotDeoptimizationEntry = -1;
+
+  // Generators for the deoptimization entry code.
+  class EntryGenerator BASE_EMBEDDED {
+   public:
+    EntryGenerator(MacroAssembler* masm, BailoutType type)
+        : masm_(masm), type_(type) { }
+    virtual ~EntryGenerator() { }
+
+    void Generate();
+
+   protected:
+    MacroAssembler* masm() const { return masm_; }
+    BailoutType type() const { return type_; }
+
+    virtual void GeneratePrologue() { }
+
+   private:
+    MacroAssembler* masm_;
+    Deoptimizer::BailoutType type_;
+  };
+
+  class TableEntryGenerator : public EntryGenerator {
+   public:
+    TableEntryGenerator(MacroAssembler* masm, BailoutType type,  int count)
+        : EntryGenerator(masm, type), count_(count) { }
+
+   protected:
+    virtual void GeneratePrologue();
+
+   private:
+    int count() const { return count_; }
+
+    int count_;
+  };
+
+ private:
+  static const int kNumberOfEntries = 4096;
+
+  Deoptimizer(JSFunction* function,
+              BailoutType type,
+              unsigned bailout_id,
+              Address from,
+              int fp_to_sp_delta);
+  void DeleteFrameDescriptions();
+
+  void DoComputeOutputFrames();
+  void DoComputeOsrOutputFrame();
+  void DoComputeFrame(TranslationIterator* iterator, int frame_index);
+  void DoTranslateCommand(TranslationIterator* iterator,
+                          int frame_index,
+                          unsigned output_offset);
+  // Translate a command for OSR.  Updates the input offset to be used for
+  // the next command.  Returns false if translation of the command failed
+  // (e.g., a number conversion failed) and may or may not have updated the
+  // input offset.
+  bool DoOsrTranslateCommand(TranslationIterator* iterator,
+                             int* input_offset);
+
+  unsigned ComputeInputFrameSize() const;
+  unsigned ComputeFixedSize(JSFunction* function) const;
+
+  unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
+  unsigned ComputeOutgoingArgumentSize() const;
+
+  Object* ComputeLiteral(int index) const;
+
+  void InsertHeapNumberValue(JavaScriptFrame* frame,
+                             int stack_index,
+                             double val,
+                             int extra_slot_count);
+
+  void AddInteger32Value(int frame_index, int slot_index, int32_t value);
+  void AddDoubleValue(int frame_index, int slot_index, double value);
+
+  static LargeObjectChunk* CreateCode(BailoutType type);
+  static void GenerateDeoptimizationEntries(
+      MacroAssembler* masm, int count, BailoutType type);
+
+  // Weak handle callback for deoptimizing code objects.
+  static void HandleWeakDeoptimizedCode(
+      v8::Persistent<v8::Value> obj, void* data);
+  static Code* FindDeoptimizingCodeFromAddress(Address addr);
+  static void RemoveDeoptimizingCode(Code* code);
+
+  static LargeObjectChunk* eager_deoptimization_entry_code_;
+  static LargeObjectChunk* lazy_deoptimization_entry_code_;
+  static Deoptimizer* current_;
+
+  // List of deoptimized code which still have references from active stack
+  // frames. These code objects are needed by the deoptimizer when deoptimizing
+  // a frame for which the code object for the function function has been
+  // changed from the code present when deoptimizing was done.
+  static DeoptimizingCodeListNode* deoptimizing_code_list_;
+
+  JSFunction* function_;
+  Code* optimized_code_;
+  unsigned bailout_id_;
+  BailoutType bailout_type_;
+  Address from_;
+  int fp_to_sp_delta_;
+
+  // Input frame description.
+  FrameDescription* input_;
+  // Number of output frames.
+  int output_count_;
+  // Array of output frame descriptions.
+  FrameDescription** output_;
+
+  List<ValueDescriptionInteger32>* integer32_values_;
+  List<ValueDescriptionDouble>* double_values_;
+
+  static int table_entry_size_;
+
+  friend class FrameDescription;
+  friend class DeoptimizingCodeListNode;
+};
+
+
+class FrameDescription {
+ public:
+  FrameDescription(uint32_t frame_size,
+                   JSFunction* function);
+
+  void* operator new(size_t size, uint32_t frame_size) {
+    return malloc(size + frame_size);
+  }
+
+  void operator delete(void* description) {
+    free(description);
+  }
+
+  intptr_t GetFrameSize() const { return frame_size_; }
+
+  JSFunction* GetFunction() const { return function_; }
+
+  unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
+
+  intptr_t GetFrameSlot(unsigned offset) {
+    return *GetFrameSlotPointer(offset);
+  }
+
+  double GetDoubleFrameSlot(unsigned offset) {
+    return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
+  }
+
+  void SetFrameSlot(unsigned offset, intptr_t value) {
+    *GetFrameSlotPointer(offset) = value;
+  }
+
+  intptr_t GetRegister(unsigned n) const {
+    ASSERT(n < ARRAY_SIZE(registers_));
+    return registers_[n];
+  }
+
+  double GetDoubleRegister(unsigned n) const {
+    ASSERT(n < ARRAY_SIZE(double_registers_));
+    return double_registers_[n];
+  }
+
+  void SetRegister(unsigned n, intptr_t value) {
+    ASSERT(n < ARRAY_SIZE(registers_));
+    registers_[n] = value;
+  }
+
+  void SetDoubleRegister(unsigned n, double value) {
+    ASSERT(n < ARRAY_SIZE(double_registers_));
+    double_registers_[n] = value;
+  }
+
+  intptr_t GetTop() const { return top_; }
+  void SetTop(intptr_t top) { top_ = top; }
+
+  intptr_t GetPc() const { return pc_; }
+  void SetPc(intptr_t pc) { pc_ = pc; }
+
+  intptr_t GetFp() const { return fp_; }
+  void SetFp(intptr_t fp) { fp_ = fp; }
+
+  Smi* GetState() const { return state_; }
+  void SetState(Smi* state) { state_ = state; }
+
+  void SetContinuation(intptr_t pc) { continuation_ = pc; }
+
+  static int registers_offset() {
+    return OFFSET_OF(FrameDescription, registers_);
+  }
+
+  static int double_registers_offset() {
+    return OFFSET_OF(FrameDescription, double_registers_);
+  }
+
+  static int frame_size_offset() {
+    return OFFSET_OF(FrameDescription, frame_size_);
+  }
+
+  static int pc_offset() {
+    return OFFSET_OF(FrameDescription, pc_);
+  }
+
+  static int state_offset() {
+    return OFFSET_OF(FrameDescription, state_);
+  }
+
+  static int continuation_offset() {
+    return OFFSET_OF(FrameDescription, continuation_);
+  }
+
+  static int frame_content_offset() {
+    return sizeof(FrameDescription);
+  }
+
+ private:
+  static const uint32_t kZapUint32 = 0xbeeddead;
+
+  uintptr_t frame_size_;  // Number of bytes.
+  JSFunction* function_;
+  intptr_t registers_[Register::kNumRegisters];
+  double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+  intptr_t top_;
+  intptr_t pc_;
+  intptr_t fp_;
+  Smi* state_;
+
+  // Continuation is the PC where the execution continues after
+  // deoptimizing.
+  intptr_t continuation_;
+
+  intptr_t* GetFrameSlotPointer(unsigned offset) {
+    ASSERT(offset < frame_size_);
+    return reinterpret_cast<intptr_t*>(
+        reinterpret_cast<Address>(this) + frame_content_offset() + offset);
+  }
+};
+
+
+class TranslationBuffer BASE_EMBEDDED {
+ public:
+  TranslationBuffer() : contents_(256) { }
+
+  int CurrentIndex() const { return contents_.length(); }
+  void Add(int32_t value);
+
+  Handle<ByteArray> CreateByteArray();
+
+ private:
+  ZoneList<uint8_t> contents_;
+};
+
+
+class TranslationIterator BASE_EMBEDDED {
+ public:
+  TranslationIterator(ByteArray* buffer, int index)
+      : buffer_(buffer), index_(index) {
+    ASSERT(index >= 0 && index < buffer->length());
+  }
+
+  int32_t Next();
+
+  bool HasNext() const { return index_ >= 0; }
+
+  void Done() { index_ = -1; }
+
+  void Skip(int n) {
+    for (int i = 0; i < n; i++) Next();
+  }
+
+ private:
+  ByteArray* buffer_;
+  int index_;
+};
+
+
+class Translation BASE_EMBEDDED {
+ public:
+  enum Opcode {
+    BEGIN,
+    FRAME,
+    REGISTER,
+    INT32_REGISTER,
+    DOUBLE_REGISTER,
+    STACK_SLOT,
+    INT32_STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    LITERAL,
+    ARGUMENTS_OBJECT,
+
+    // A prefix indicating that the next command is a duplicate of the one
+    // that follows it.
+    DUPLICATE
+  };
+
+  Translation(TranslationBuffer* buffer, int frame_count)
+      : buffer_(buffer),
+        index_(buffer->CurrentIndex()) {
+    buffer_->Add(BEGIN);
+    buffer_->Add(frame_count);
+  }
+
+  int index() const { return index_; }
+
+  // Commands.
+  void BeginFrame(int node_id, int literal_id, unsigned height);
+  void StoreRegister(Register reg);
+  void StoreInt32Register(Register reg);
+  void StoreDoubleRegister(DoubleRegister reg);
+  void StoreStackSlot(int index);
+  void StoreInt32StackSlot(int index);
+  void StoreDoubleStackSlot(int index);
+  void StoreLiteral(int literal_id);
+  void StoreArgumentsObject();
+  void MarkDuplicate();
+
+  static int NumberOfOperandsFor(Opcode opcode);
+
+#ifdef OBJECT_PRINT
+  static const char* StringFor(Opcode opcode);
+#endif
+
+ private:
+  TranslationBuffer* buffer_;
+  int index_;
+};
+
+
+// Linked list holding deoptimizing code objects. The deoptimizing code objects
+// are kept as weak handles until they are no longer activated on the stack.
+class DeoptimizingCodeListNode : public Malloced {
+ public:
+  explicit DeoptimizingCodeListNode(Code* code);
+  ~DeoptimizingCodeListNode();
+
+  DeoptimizingCodeListNode* next() const { return next_; }
+  void set_next(DeoptimizingCodeListNode* next) { next_ = next; }
+  Handle<Code> code() const { return code_; }
+
+ private:
+  // Global (weak) handle to the deoptimizing code object.
+  Handle<Code> code_;
+
+  // Next pointer for linked list.
+  DeoptimizingCodeListNode* next_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_DEOPTIMIZER_H_
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 2a4ea74..bb0a072 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 #include "code-stubs.h"
 #include "codegen-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "disasm.h"
 #include "disassembler.h"
 #include "macro-assembler.h"
@@ -277,6 +278,15 @@
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
         }
+      } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
+        // A runtime entry reloinfo might be a deoptimization bailout.
+        Address addr = relocinfo.target_address();
+        int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
+        if (id == Deoptimizer::kNotDeoptimizationEntry) {
+          out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
+        } else {
+          out.AddFormatted("    ;; deoptimization bailout %d", id);
+        }
       } else {
         out.AddFormatted("    ;; %s", RelocInfo::RelocModeName(rmode));
       }
@@ -299,8 +309,17 @@
 
 // Called by Code::CodePrint.
 void Disassembler::Decode(FILE* f, Code* code) {
-  byte* begin = Code::cast(code)->instruction_start();
-  byte* end = begin + Code::cast(code)->instruction_size();
+  int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+      ? static_cast<int>(code->safepoint_table_start())
+      : code->instruction_size();
+  // If there might be a stack check table, stop before reaching it.
+  if (code->kind() == Code::FUNCTION) {
+    decode_size =
+        Min(decode_size, static_cast<int>(code->stack_check_table_start()));
+  }
+
+  byte* begin = code->instruction_start();
+  byte* end = begin + decode_size;
   V8NameConverter v8NameConverter(code);
   DecodeIt(f, v8NameConverter, begin, end);
 }
diff --git a/src/execution.cc b/src/execution.cc
index 691d569..11dacfe 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -33,8 +33,10 @@
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
+#include "runtime-profiler.h"
 #include "simulator.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -295,6 +297,25 @@
 }
 
 
+bool StackGuard::IsRuntimeProfilerTick() {
+  ExecutionAccess access;
+  return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
+}
+
+
+void StackGuard::RequestRuntimeProfilerTick() {
+  // Ignore calls if we're not optimizing or if we can't get the lock.
+  if (FLAG_opt && ExecutionAccess::TryLock()) {
+    thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
+    if (thread_local_.postpone_interrupts_nesting_ == 0) {
+      thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+      Heap::SetStackLimits();
+    }
+    ExecutionAccess::Unlock();
+  }
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 bool StackGuard::IsDebugBreak() {
   ExecutionAccess access;
@@ -682,6 +703,12 @@
 #endif
 
 MaybeObject* Execution::HandleStackGuardInterrupt() {
+  Counters::stack_interrupts.Increment();
+  if (StackGuard::IsRuntimeProfilerTick()) {
+    Counters::runtime_profiler_ticks.Increment();
+    StackGuard::Continue(RUNTIME_PROFILER_TICK);
+    RuntimeProfiler::OptimizeNow();
+  }
 #ifdef ENABLE_DEBUGGER_SUPPORT
   if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
     DebugBreakHelper();
@@ -693,7 +720,6 @@
     return Top::TerminateExecution();
   }
   if (StackGuard::IsInterrupted()) {
-    // interrupt
     StackGuard::Continue(INTERRUPT);
     return Top::StackOverflow();
   }
diff --git a/src/execution.h b/src/execution.h
index a2ddc41..af8ad9a 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -38,7 +38,8 @@
   DEBUGBREAK = 1 << 1,
   DEBUGCOMMAND = 1 << 2,
   PREEMPT = 1 << 3,
-  TERMINATE = 1 << 4
+  TERMINATE = 1 << 4,
+  RUNTIME_PROFILER_TICK = 1 << 5
 };
 
 class Execution : public AllStatic {
@@ -175,6 +176,8 @@
   static void Interrupt();
   static bool IsTerminateExecution();
   static void TerminateExecution();
+  static bool IsRuntimeProfilerTick();
+  static void RequestRuntimeProfilerTick();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static bool IsDebugBreak();
   static void DebugBreak();
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
new file mode 100644
index 0000000..22a1c91
--- /dev/null
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -0,0 +1,263 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "i18n-extension.h"
+
+#include <algorithm>
+#include <string>
+
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+I18NExtension* I18NExtension::extension_ = NULL;
+
+// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
+const char* const I18NExtension::kSource =
+  "Locale = function(optLocale) {"
+  "  native function NativeJSLocale();"
+  "  var properties = NativeJSLocale(optLocale);"
+  "  this.locale = properties.locale;"
+  "  this.language = properties.language;"
+  "  this.script = properties.script;"
+  "  this.region = properties.region;"
+  "};"
+  "Locale.availableLocales = function() {"
+  "  native function NativeJSAvailableLocales();"
+  "  return NativeJSAvailableLocales();"
+  "};"
+  "Locale.prototype.maximizedLocale = function() {"
+  "  native function NativeJSMaximizedLocale();"
+  "  return new Locale(NativeJSMaximizedLocale(this.locale));"
+  "};"
+  "Locale.prototype.minimizedLocale = function() {"
+  "  native function NativeJSMinimizedLocale();"
+  "  return new Locale(NativeJSMinimizedLocale(this.locale));"
+  "};"
+  "Locale.prototype.displayLocale_ = function(displayLocale) {"
+  "  var result = this.locale;"
+  "  if (displayLocale !== undefined) {"
+  "    result = displayLocale.locale;"
+  "  }"
+  "  return result;"
+  "};"
+  "Locale.prototype.displayLanguage = function(optDisplayLocale) {"
+  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
+  "  native function NativeJSDisplayLanguage();"
+  "  return NativeJSDisplayLanguage(this.locale, displayLocale);"
+  "};"
+  "Locale.prototype.displayScript = function(optDisplayLocale) {"
+  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
+  "  native function NativeJSDisplayScript();"
+  "  return NativeJSDisplayScript(this.locale, displayLocale);"
+  "};"
+  "Locale.prototype.displayRegion = function(optDisplayLocale) {"
+  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
+  "  native function NativeJSDisplayRegion();"
+  "  return NativeJSDisplayRegion(this.locale, displayLocale);"
+  "};"
+  "Locale.prototype.displayName = function(optDisplayLocale) {"
+  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
+  "  native function NativeJSDisplayName();"
+  "  return NativeJSDisplayName(this.locale, displayLocale);"
+  "};";
+
+v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
+    v8::Handle<v8::String> name) {
+  if (name->Equals(v8::String::New("NativeJSLocale"))) {
+    return v8::FunctionTemplate::New(JSLocale);
+  } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
+    return v8::FunctionTemplate::New(JSAvailableLocales);
+  } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
+    return v8::FunctionTemplate::New(JSMaximizedLocale);
+  } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
+    return v8::FunctionTemplate::New(JSMinimizedLocale);
+  } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
+    return v8::FunctionTemplate::New(JSDisplayLanguage);
+  } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
+    return v8::FunctionTemplate::New(JSDisplayScript);
+  } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
+    return v8::FunctionTemplate::New(JSDisplayRegion);
+  } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
+    return v8::FunctionTemplate::New(JSDisplayName);
+  }
+
+  return v8::Handle<v8::FunctionTemplate>();
+}
+
+v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
+  // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
+  // We could possibly pass browser locale as a parameter in the constructor.
+  std::string locale_name("en-US");
+  if (args.Length() == 1 && args[0]->IsString()) {
+    locale_name = *v8::String::Utf8Value(args[0]->ToString());
+  }
+
+  v8::Local<v8::Object> locale = v8::Object::New();
+  locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
+
+  icu::Locale icu_locale(locale_name.c_str());
+
+  const char* language = icu_locale.getLanguage();
+  locale->Set(v8::String::New("language"), v8::String::New(language));
+
+  const char* script = icu_locale.getScript();
+  if (strlen(script)) {
+    locale->Set(v8::String::New("script"), v8::String::New(script));
+  }
+
+  const char* region = icu_locale.getCountry();
+  if (strlen(region)) {
+    locale->Set(v8::String::New("region"), v8::String::New(region));
+  }
+
+  return locale;
+}
+
+// TODO(cira): Filter out locales that Chrome doesn't support.
+v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
+    const v8::Arguments& args) {
+  v8::Local<v8::Array> all_locales = v8::Array::New();
+
+  int count = 0;
+  const Locale* icu_locales = icu::Locale::getAvailableLocales(count);
+  for (int i = 0; i < count; ++i) {
+    all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
+  }
+
+  return all_locales;
+}
+
+// Use - as tag separator, not _ that ICU uses.
+static std::string NormalizeLocale(const std::string& locale) {
+  std::string result(locale);
+  // TODO(cira): remove STL dependency.
+  std::replace(result.begin(), result.end(), '_', '-');
+  return result;
+}
+
+v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
+    const v8::Arguments& args) {
+  if (!args.Length() || !args[0]->IsString()) {
+    return v8::Undefined();
+  }
+
+  UErrorCode status = U_ZERO_ERROR;
+  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
+  char max_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_addLikelySubtags(locale_name.c_str(), max_locale,
+                        sizeof(max_locale), &status);
+  if (U_FAILURE(status)) {
+    return v8::Undefined();
+  }
+
+  return v8::String::New(NormalizeLocale(max_locale).c_str());
+}
+
+v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
+    const v8::Arguments& args) {
+  if (!args.Length() || !args[0]->IsString()) {
+    return v8::Undefined();
+  }
+
+  UErrorCode status = U_ZERO_ERROR;
+  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
+  char min_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_minimizeSubtags(locale_name.c_str(), min_locale,
+                       sizeof(min_locale), &status);
+  if (U_FAILURE(status)) {
+    return v8::Undefined();
+  }
+
+  return v8::String::New(NormalizeLocale(min_locale).c_str());
+}
+
+// Common code for JSDisplayXXX methods.
+static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
+                                            const std::string& item) {
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::Undefined();
+  }
+
+  std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
+  icu::Locale icu_locale(base_locale.c_str());
+  icu::Locale display_locale =
+      icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
+  UnicodeString result;
+  if (item == "language") {
+    icu_locale.getDisplayLanguage(display_locale, result);
+  } else if (item == "script") {
+    icu_locale.getDisplayScript(display_locale, result);
+  } else if (item == "region") {
+    icu_locale.getDisplayCountry(display_locale, result);
+  } else if (item == "name") {
+    icu_locale.getDisplayName(display_locale, result);
+  } else {
+    return v8::Undefined();
+  }
+
+  if (result.length()) {
+    return v8::String::New(
+        reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+  }
+
+  return v8::Undefined();
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
+    const v8::Arguments& args) {
+  return GetDisplayItem(args, "language");
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
+    const v8::Arguments& args) {
+  return GetDisplayItem(args, "script");
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
+    const v8::Arguments& args) {
+  return GetDisplayItem(args, "region");
+}
+
+v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
+  return GetDisplayItem(args, "name");
+}
+
+I18NExtension* I18NExtension::get() {
+  if (!extension_) {
+    extension_ = new I18NExtension();
+  }
+  return extension_;
+}
+
+void I18NExtension::Register() {
+  static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
new file mode 100644
index 0000000..629332b
--- /dev/null
+++ b/src/extensions/experimental/i18n-extension.h
@@ -0,0 +1,64 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+
+#include <v8.h>
+
+namespace v8 {
+namespace internal {
+
+
+class I18NExtension : public v8::Extension {
+ public:
+  I18NExtension() : v8::Extension("v8/i18n", kSource) {}
+  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+      v8::Handle<v8::String> name);
+
+  // Implementations of window.Locale methods.
+  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
+
+  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
+  static void Register();
+  static I18NExtension* get();
+
+ private:
+  static const char* const kSource;
+  static I18NExtension* extension_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
diff --git a/src/factory.cc b/src/factory.cc
index a05ff6c..83af447 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,6 +32,7 @@
 #include "execution.h"
 #include "factory.h"
 #include "macro-assembler.h"
+#include "objects.h"
 #include "objects-visiting.h"
 
 namespace v8 {
@@ -73,6 +74,26 @@
 }
 
 
+Handle<DeoptimizationInputData> Factory::NewDeoptimizationInputData(
+    int deopt_entry_count,
+    PretenureFlag pretenure) {
+  ASSERT(deopt_entry_count > 0);
+  CALL_HEAP_FUNCTION(DeoptimizationInputData::Allocate(deopt_entry_count,
+                                                       pretenure),
+                     DeoptimizationInputData);
+}
+
+
+Handle<DeoptimizationOutputData> Factory::NewDeoptimizationOutputData(
+    int deopt_entry_count,
+    PretenureFlag pretenure) {
+  ASSERT(deopt_entry_count > 0);
+  CALL_HEAP_FUNCTION(DeoptimizationOutputData::Allocate(deopt_entry_count,
+                                                        pretenure),
+                     DeoptimizationOutputData);
+}
+
+
 // Symbols are created in the old generation (data space).
 Handle<String> Factory::LookupSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
@@ -243,6 +264,13 @@
 }
 
 
+Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell(
+    Handle<Object> value) {
+  CALL_HEAP_FUNCTION(Heap::AllocateJSGlobalPropertyCell(*value),
+                     JSGlobalPropertyCell);
+}
+
+
 Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
   CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
 }
@@ -333,6 +361,15 @@
                   context->global_context());
   }
   result->set_literals(*literals);
+  result->set_next_function_link(Heap::undefined_value());
+
+  if (V8::UseCrankshaft() &&
+      FLAG_always_opt &&
+      result->is_compiled() &&
+      !function_info->is_toplevel() &&
+      function_info->allows_lazy_compilation()) {
+    result->MarkForLazyRecompilation();
+  }
   return result;
 }
 
diff --git a/src/factory.h b/src/factory.h
index c014986..b7a2882 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -53,6 +53,12 @@
   static Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
 
   static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
+  static Handle<DeoptimizationInputData> NewDeoptimizationInputData(
+      int deopt_entry_count,
+      PretenureFlag pretenure);
+  static Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
+      int deopt_entry_count,
+      PretenureFlag pretenure);
 
   static Handle<String> LookupSymbol(Vector<const char> str);
   static Handle<String> LookupAsciiSymbol(const char* str) {
@@ -169,6 +175,9 @@
       void* external_pointer,
       PretenureFlag pretenure = NOT_TENURED);
 
+  static Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
+      Handle<Object> value);
+
   static Handle<Map> NewMap(InstanceType type, int instance_size);
 
   static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 46feea7..f160a85 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -96,9 +96,57 @@
 //
 #define FLAG FLAG_FULL
 
+// Flags for Crankshaft.
+#ifdef V8_TARGET_ARCH_IA32
+DEFINE_bool(crankshaft, true, "use crankshaft")
+#else
+DEFINE_bool(crankshaft, false, "use crankshaft")
+#endif
+DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
+DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
+DEFINE_bool(build_lithium, true, "use lithium chunk builder")
+DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
+DEFINE_bool(use_lithium, true, "use lithium code generator")
+DEFINE_bool(use_range, true, "use hydrogen range analysis")
+DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
+DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
+DEFINE_bool(use_peeling, false, "use loop peeling")
+DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
+DEFINE_bool(use_inlining, true, "use function inlining")
+DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
+DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
+DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(time_hydrogen, false, "timing for hydrogen")
+DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
+DEFINE_bool(trace_inlining, false, "trace inlining decisions")
+DEFINE_bool(trace_alloc, false, "trace register allocator")
+DEFINE_bool(trace_range, false, "trace range analysis")
+DEFINE_bool(trace_gvn, false, "trace global value numbering")
+DEFINE_bool(trace_environment, false, "trace lithium environments")
+DEFINE_bool(trace_representation, false, "trace representation types")
+DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
+DEFINE_bool(stress_environments, false, "environment for every instruction")
+DEFINE_int(deopt_every_n_times,
+           0,
+           "deoptimize every n times a deopt point is passed")
+DEFINE_bool(process_arguments_object, true, "try to deal with arguments object")
+DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
+DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
+DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
+DEFINE_bool(aggressive_loop_invariant_motion, true,
+            "aggressive motion of instructions out of loops")
+#ifdef V8_TARGET_ARCH_IA32
+DEFINE_bool(use_osr, true, "use on-stack replacement")
+#else
+DEFINE_bool(use_osr, false, "use on-stack replacement")
+#endif
+DEFINE_bool(trace_osr, false, "trace on-stack replacement")
+DEFINE_int(stress_runs, 0, "number of stress runs")
+
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
-            "generate extra code (comments, assertions) for debugging")
+            "generate extra code (assertions) for debugging")
+DEFINE_bool(code_comments, false, "emit comments in code disassembly")
 DEFINE_bool(emit_branch_hints, false, "emit branch hints")
 DEFINE_bool(peephole_optimization, true,
             "perform peephole optimizations in assembly code")
@@ -146,7 +194,15 @@
 
 // codegen.cc
 DEFINE_bool(lazy, true, "use lazy compilation")
+DEFINE_bool(trace_opt, false, "trace lazy optimization")
+DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
+DEFINE_bool(opt, true, "use adaptive optimizations")
+DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
+DEFINE_bool(always_opt, false, "always try to optimize functions")
+DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
 DEFINE_bool(debug_info, true, "add debug information to compiled functions")
+DEFINE_bool(deopt, true, "support deoptimization")
+DEFINE_bool(trace_deopt, false, "trace deoptimization")
 
 // compiler.cc
 DEFINE_bool(strict, false, "strict error checking")
@@ -240,6 +296,9 @@
 DEFINE_bool(h, false, "print this message")
 DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
 
+// objects.cc
+DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
+
 // parser.cc
 DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
 
@@ -365,6 +424,9 @@
             "report heap spill statistics along with heap_stats "
             "(requires heap_stats)")
 
+// VM state
+DEFINE_bool(log_state_changes, false, "Log state changes.")
+
 // Regexp
 DEFINE_bool(regexp_possessive_quantifier,
             false,
@@ -397,11 +459,8 @@
 DEFINE_bool(log_handles, false, "Log global handle events.")
 DEFINE_bool(log_snapshot_positions, false,
             "log positions of (de)serialized objects in the snapshot.")
-DEFINE_bool(log_state_changes, false, "Log state changes.")
 DEFINE_bool(log_suspect, false, "Log suspect operations.")
 DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
-DEFINE_bool(compress_log, false,
-            "Compress log to save space (makes log less human-readable).")
 DEFINE_bool(prof, false,
             "Log statistical profiling information (implies --log-code).")
 DEFINE_bool(prof_auto, true,
@@ -446,6 +505,10 @@
 
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(print_code, false, "print generated code")
+DEFINE_bool(print_opt_code, false, "print optimized code")
+DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
+            "printing optimized code based on it")
+DEFINE_bool(print_code_verbose, false, "print more information for code")
 DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
 
 // Cleanup...
diff --git a/src/flags.cc b/src/flags.cc
index bbe6bb7..c20f5ee 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -279,7 +279,7 @@
   *value = NULL;
   *is_bool = false;
 
-  if (*arg == '-') {
+  if (arg != NULL && *arg == '-') {
     // find the begin of the flag name
     arg++;  // remove 1st '-'
     if (*arg == '-') {
diff --git a/src/frame-element.h b/src/frame-element.h
index 48bb354..3b91b9d 100644
--- a/src/frame-element.h
+++ b/src/frame-element.h
@@ -262,8 +262,8 @@
   class CopiedField: public BitField<bool, 3, 1> {};
   class SyncedField: public BitField<bool, 4, 1> {};
   class UntaggedInt32Field: public BitField<bool, 5, 1> {};
-  class TypeInfoField: public BitField<int, 6, 6> {};
-  class DataField: public BitField<uint32_t, 12, 32 - 12> {};
+  class TypeInfoField: public BitField<int, 6, 7> {};
+  class DataField: public BitField<uint32_t, 13, 32 - 13> {};
 
   friend class VirtualFrame;
 };
diff --git a/src/frames.cc b/src/frames.cc
index 3cdb015..3af7288 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -27,8 +27,12 @@
 
 #include "v8.h"
 
+#include "ast.h"
+#include "deoptimizer.h"
 #include "frames-inl.h"
+#include "full-codegen.h"
 #include "mark-compact.h"
+#include "safepoint-table.h"
 #include "scopeinfo.h"
 #include "string-stream.h"
 #include "top.h"
@@ -324,11 +328,33 @@
 #endif
 
 
+Code* StackFrame::GetSafepointData(Address pc,
+                                   uint8_t** safepoint_entry,
+                                   unsigned* stack_slots) {
+  PcToCodeCache::PcToCodeCacheEntry* entry = PcToCodeCache::GetCacheEntry(pc);
+  uint8_t* cached_safepoint_entry = entry->safepoint_entry;
+  if (cached_safepoint_entry == NULL) {
+    cached_safepoint_entry = entry->code->GetSafepointEntry(pc);
+    ASSERT(cached_safepoint_entry != NULL);  // No safepoint found.
+    entry->safepoint_entry = cached_safepoint_entry;
+  } else {
+    ASSERT(cached_safepoint_entry == entry->code->GetSafepointEntry(pc));
+  }
+
+  // Fill in the results and return the code.
+  Code* code = entry->code;
+  *safepoint_entry = cached_safepoint_entry;
+  *stack_slots = code->stack_slots();
+  return code;
+}
+
+
 bool StackFrame::HasHandler() const {
   StackHandlerIterator it(this, top_handler());
   return !it.done();
 }
 
+
 void StackFrame::IteratePc(ObjectVisitor* v,
                            Address* pc_address,
                            Code* holder) {
@@ -355,7 +381,16 @@
   // really the function.
   const int offset = StandardFrameConstants::kMarkerOffset;
   Object* marker = Memory::Object_at(state->fp + offset);
-  if (!marker->IsSmi()) return JAVA_SCRIPT;
+  if (!marker->IsSmi()) {
+    // If we're using a "safe" stack iterator, we treat optimized
+    // frames as normal JavaScript frames to avoid having to look
+    // into the heap to determine the state. This is safe as long
+    // as nobody tries to GC...
+    if (SafeStackFrameIterator::is_active()) return JAVA_SCRIPT;
+    Code::Kind kind = GetContainingCode(*(state->pc_address))->kind();
+    ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
+    return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
+  }
   return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
 }
 
@@ -488,6 +523,70 @@
 }
 
 
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+#ifdef DEBUG
+  // Make sure that optimized frames do not contain any stack handlers.
+  StackHandlerIterator it(this, top_handler());
+  ASSERT(it.done());
+#endif
+
+  // Make sure that we're not doing "safe" stack frame iteration. We cannot
+  // possibly find pointers in optimized frames in that state.
+  ASSERT(!SafeStackFrameIterator::is_active());
+
+  // Compute the safepoint information.
+  unsigned stack_slots = 0;
+  uint8_t* safepoint_entry = NULL;
+  Code* code = StackFrame::GetSafepointData(
+      pc(), &safepoint_entry, &stack_slots);
+  unsigned slot_space = stack_slots * kPointerSize;
+
+  // Visit the outgoing parameters. This is usually dealt with by the
+  // callee, but while GC'ing we artificially lower the number of
+  // arguments to zero and let the caller deal with it.
+  Object** parameters_base = &Memory::Object_at(sp());
+  Object** parameters_limit = &Memory::Object_at(
+      fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
+
+  // Visit the registers that contain pointers if any.
+  if (SafepointTable::HasRegisters(safepoint_entry)) {
+    for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
+      if (SafepointTable::HasRegisterAt(safepoint_entry, i)) {
+        int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
+        v->VisitPointer(parameters_base + reg_stack_index);
+      }
+    }
+    // Skip the words containing the register values.
+    parameters_base += kNumSafepointRegisters;
+  }
+
+  // We're done dealing with the register bits.
+  safepoint_entry += kNumSafepointRegisters >> kBitsPerByteLog2;
+
+  // Visit the rest of the parameters.
+  v->VisitPointers(parameters_base, parameters_limit);
+
+  // Visit pointer spill slots and locals.
+  for (unsigned index = 0; index < stack_slots; index++) {
+    int byte_index = index >> kBitsPerByteLog2;
+    int bit_index = index & (kBitsPerByte - 1);
+    if ((safepoint_entry[byte_index] & (1U << bit_index)) != 0) {
+      v->VisitPointer(parameters_limit + index);
+    }
+  }
+
+  // Visit the context and the function.
+  Object** fixed_base = &Memory::Object_at(
+      fp() + JavaScriptFrameConstants::kFunctionOffset);
+  Object** fixed_limit = &Memory::Object_at(fp());
+  v->VisitPointers(fixed_base, fixed_limit);
+
+  // Visit the return address in the callee and incoming arguments.
+  IteratePc(v, pc_address(), code);
+  IterateArguments(v);
+}
+
+
 Object* JavaScriptFrame::GetParameter(int index) const {
   ASSERT(index >= 0 && index < ComputeParametersCount());
   const int offset = JavaScriptFrameConstants::kParam0Offset;
@@ -547,6 +646,185 @@
 }
 
 
+void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
+  ASSERT(functions->length() == 0);
+  functions->Add(JSFunction::cast(function()));
+}
+
+
+void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
+  ASSERT(functions->length() == 0);
+  Code* code_pointer = code();
+  int offset = static_cast<int>(pc() - code_pointer->address());
+  FrameSummary summary(receiver(),
+                       JSFunction::cast(function()),
+                       code_pointer,
+                       offset,
+                       IsConstructor());
+  functions->Add(summary);
+}
+
+
+void FrameSummary::Print() {
+  PrintF("receiver: ");
+  receiver_->ShortPrint();
+  PrintF("\nfunction: ");
+  function_->shared()->DebugName()->ShortPrint();
+  PrintF("\ncode: ");
+  code_->ShortPrint();
+  if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
+  if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
+  PrintF("\npc: %d\n", offset_);
+}
+
+
+void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
+  ASSERT(frames->length() == 0);
+  ASSERT(is_optimized());
+
+  int deopt_index = AstNode::kNoNumber;
+  DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+  // BUG(3243555): Since we don't have a lazy-deopt registered at
+  // throw-statements, we can't use the translation at the call-site of
+  // throw. An entry with no deoptimization index indicates a call-site
+  // without a lazy-deopt. As a consequence we are not allowed to inline
+  // functions containing throw.
+  if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
+    JavaScriptFrame::Summarize(frames);
+    return;
+  }
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+
+  // We create the summary in reverse order because the frames
+  // in the deoptimization translation are ordered bottom-to-top.
+  int i = frame_count;
+  while (i > 0) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+    if (opcode == Translation::FRAME) {
+      // We don't inline constructor calls, so only the first, outermost
+      // frame can be a constructor frame in case of inlining.
+      bool is_constructor = (i == frame_count) && IsConstructor();
+
+      i--;
+      int ast_id = it.Next();
+      int function_id = it.Next();
+      it.Next();  // Skip height.
+      JSFunction* function =
+          JSFunction::cast(data->LiteralArray()->get(function_id));
+
+      // The translation commands are ordered and the receiver is always
+      // at the first position. Since we are always at a call when we need
+      // to construct a stack trace, the receiver is always in a stack slot.
+      opcode = static_cast<Translation::Opcode>(it.Next());
+      ASSERT(opcode == Translation::STACK_SLOT);
+      int input_slot_index = it.Next();
+
+      // Get the correct receiver in the optimized frame.
+      Object* receiver = NULL;
+      // Positive index means the value is spilled to the locals area. Negative
+      // means it is stored in the incoming parameter area.
+      if (input_slot_index >= 0) {
+        receiver = GetExpression(input_slot_index);
+      } else {
+        // Index -1 overlaps with last parameter, -n with the first parameter,
+        // (-n - 1) with the receiver with n being the number of parameters
+        // of the outermost, optimized frame.
+        int parameter_count = ComputeParametersCount();
+        int parameter_index = input_slot_index + parameter_count;
+        receiver = (parameter_index == -1)
+            ? this->receiver()
+            : this->GetParameter(parameter_index);
+      }
+
+      Code* code = function->shared()->code();
+      DeoptimizationOutputData* output_data =
+          DeoptimizationOutputData::cast(code->deoptimization_data());
+      unsigned entry = Deoptimizer::GetOutputInfo(output_data,
+                                                  ast_id,
+                                                  function->shared());
+      unsigned pc_offset =
+          FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
+      ASSERT(pc_offset > 0);
+
+      FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
+      frames->Add(summary);
+    } else {
+      // Skip over operands to advance to the next opcode.
+      it.Skip(Translation::NumberOfOperandsFor(opcode));
+    }
+  }
+}
+
+
+DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
+    int* deopt_index) {
+  ASSERT(is_optimized());
+
+  JSFunction* opt_function = JSFunction::cast(function());
+  Code* code = opt_function->code();
+
+  // The code object may have been replaced by lazy deoptimization. Fall
+  // back to a slow search in this case to find the original optimized
+  // code object.
+  if (!code->contains(pc())) {
+    code = PcToCodeCache::GcSafeFindCodeForPc(pc());
+  }
+  ASSERT(code != NULL);
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+
+  SafepointTable table(code);
+  unsigned pc_offset = static_cast<unsigned>(pc() - code->instruction_start());
+  for (unsigned i = 0; i < table.length(); i++) {
+    if (table.GetPcOffset(i) == pc_offset) {
+      *deopt_index = table.GetDeoptimizationIndex(i);
+      break;
+    }
+  }
+  ASSERT(*deopt_index != AstNode::kNoNumber);
+
+  return DeoptimizationInputData::cast(code->deoptimization_data());
+}
+
+
+void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
+  ASSERT(functions->length() == 0);
+  ASSERT(is_optimized());
+
+  int deopt_index = AstNode::kNoNumber;
+  DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+
+  // We insert the frames in reverse order because the frames
+  // in the deoptimization translation are ordered bottom-to-top.
+  while (frame_count > 0) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+    if (opcode == Translation::FRAME) {
+      frame_count--;
+      it.Next();  // Skip ast id.
+      int function_id = it.Next();
+      it.Next();  // Skip height.
+      JSFunction* function =
+          JSFunction::cast(data->LiteralArray()->get(function_id));
+      functions->Add(function);
+    } else {
+      // Skip over operands to advance to the next opcode.
+      it.Skip(Translation::NumberOfOperandsFor(opcode));
+    }
+  }
+}
+
+
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
   const int arguments = Smi::cast(GetExpression(0))->value();
   const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -789,7 +1067,11 @@
 void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
   IterateExpressions(v);
   IteratePc(v, pc_address(), code());
+  IterateArguments(v);
+}
 
+
+void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
   // Traverse callee-saved registers, receiver, and parameters.
   const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
   const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
@@ -851,6 +1133,7 @@
   }
 }
 
+
 PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
   Counters::pc_to_code.Increment();
   ASSERT(IsPowerOf2(kPcToCodeCacheSize));
@@ -867,6 +1150,7 @@
     // been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
     entry->code = GcSafeFindCodeForPc(pc);
+    entry->safepoint_entry = NULL;
     entry->pc = pc;
   }
   return entry;
diff --git a/src/frames.h b/src/frames.h
index 2d4f338..778f9d2 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -51,6 +51,7 @@
   struct PcToCodeCacheEntry {
     Address pc;
     Code* code;
+    uint8_t* safepoint_entry;
   };
 
   static PcToCodeCacheEntry* cache(int index) {
@@ -115,6 +116,7 @@
   V(ENTRY_CONSTRUCT,   EntryConstructFrame)   \
   V(EXIT,              ExitFrame)             \
   V(JAVA_SCRIPT,       JavaScriptFrame)       \
+  V(OPTIMIZED,         OptimizedFrame)        \
   V(INTERNAL,          InternalFrame)         \
   V(CONSTRUCT,         ConstructFrame)        \
   V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -158,12 +160,17 @@
   bool is_entry() const { return type() == ENTRY; }
   bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
   bool is_exit() const { return type() == EXIT; }
-  bool is_java_script() const { return type() == JAVA_SCRIPT; }
+  bool is_optimized() const { return type() == OPTIMIZED; }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
   bool is_internal() const { return type() == INTERNAL; }
   bool is_construct() const { return type() == CONSTRUCT; }
   virtual bool is_standard() const { return false; }
 
+  bool is_java_script() const {
+    Type type = this->type();
+    return (type == JAVA_SCRIPT) || (type == OPTIMIZED);
+  }
+
   // Accessors.
   Address sp() const { return state_.sp; }
   Address fp() const { return state_.fp; }
@@ -193,10 +200,17 @@
   Code* code() const { return GetContainingCode(pc()); }
 
   // Get the code object that contains the given pc.
-  Code* GetContainingCode(Address pc) const {
+  static Code* GetContainingCode(Address pc) {
     return PcToCodeCache::GetCacheEntry(pc)->code;
   }
 
+  // Get the code object containing the given pc and fill in the
+  // safepoint entry and the number of stack slots. The pc must be at
+  // a safepoint.
+  static Code* GetSafepointData(Address pc,
+                                uint8_t** safepoint_entry,
+                                unsigned* stack_slots);
+
   virtual void Iterate(ObjectVisitor* v) const = 0;
   static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
 
@@ -393,6 +407,36 @@
 };
 
 
+class FrameSummary BASE_EMBEDDED {
+ public:
+  FrameSummary(Object* receiver,
+               JSFunction* function,
+               Code* code,
+               int offset,
+               bool is_constructor)
+      : receiver_(receiver),
+        function_(function),
+        code_(code),
+        offset_(offset),
+        is_constructor_(is_constructor) { }
+  Handle<Object> receiver() { return receiver_; }
+  Handle<JSFunction> function() { return function_; }
+  Handle<Code> code() { return code_; }
+  Address pc() { return reinterpret_cast<Address>(*code_) + offset_; }
+  int offset() { return offset_; }
+  bool is_constructor() { return is_constructor_; }
+
+  void Print();
+
+ private:
+  Handle<Object> receiver_;
+  Handle<JSFunction> function_;
+  Handle<Code> code_;
+  int offset_;
+  bool is_constructor_;
+};
+
+
 class JavaScriptFrame: public StandardFrame {
  public:
   virtual Type type() const { return JAVA_SCRIPT; }
@@ -431,6 +475,12 @@
   // Determine the code for the frame.
   virtual Code* unchecked_code() const;
 
+  // Return a list with JSFunctions of this frame.
+  virtual void GetFunctions(List<JSFunction*>* functions);
+
+  // Build a list with summaries for this frame including all inlined frames.
+  virtual void Summarize(List<FrameSummary>* frames);
+
   static JavaScriptFrame* cast(StackFrame* frame) {
     ASSERT(frame->is_java_script());
     return static_cast<JavaScriptFrame*>(frame);
@@ -442,6 +492,10 @@
 
   virtual Address GetCallerStackPointer() const;
 
+  // Garbage collection support. Iterates over incoming arguments,
+  // receiver, and any callee-saved registers.
+  void IterateArguments(ObjectVisitor* v) const;
+
  private:
   inline Object* function_slot_object() const;
 
@@ -450,6 +504,31 @@
 };
 
 
+class OptimizedFrame : public JavaScriptFrame {
+ public:
+  virtual Type type() const { return OPTIMIZED; }
+
+  // GC support.
+  virtual void Iterate(ObjectVisitor* v) const;
+
+  // Return a list with JSFunctions of this frame.
+  // The functions are ordered bottom-to-top (i.e. functions.last()
+  // is the top-most activation)
+  virtual void GetFunctions(List<JSFunction*>* functions);
+
+  virtual void Summarize(List<FrameSummary>* frames);
+
+  DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
+
+ protected:
+  explicit OptimizedFrame(StackFrameIterator* iterator)
+      : JavaScriptFrame(iterator) { }
+
+ private:
+  friend class StackFrameIterator;
+};
+
+
 // Arguments adaptor frames are automatically inserted below
 // JavaScript frames when the actual number of parameters does not
 // match the formal number of parameters.
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 55aa230..58540f0 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,12 +29,13 @@
 
 #include "codegen-inl.h"
 #include "compiler.h"
+#include "debug.h"
 #include "full-codegen.h"
+#include "liveedit.h"
 #include "macro-assembler.h"
+#include "prettyprinter.h"
 #include "scopes.h"
 #include "stub-cache.h"
-#include "debug.h"
-#include "liveedit.h"
 
 namespace v8 {
 namespace internal {
@@ -166,10 +167,6 @@
 }
 
 
-void BreakableStatementChecker::VisitSlot(Slot* expr) {
-}
-
-
 void BreakableStatementChecker::VisitVariableProxy(VariableProxy* expr) {
 }
 
@@ -283,6 +280,9 @@
     int len = String::cast(script->source())->length();
     Counters::total_full_codegen_source_size.Increment(len);
   }
+  if (FLAG_trace_codegen) {
+    PrintF("Full Compiler - ");
+  }
   CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
@@ -293,14 +293,105 @@
     ASSERT(!Top::has_pending_exception());
     return false;
   }
+  unsigned table_offset = cgen.EmitStackCheckTable();
 
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
+  code->set_optimizable(info->IsOptimizable());
+  cgen.PopulateDeoptimizationData(code);
+  code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+  code->set_allow_osr_at_loop_nesting_level(0);
+  code->set_stack_check_table_start(table_offset);
+  CodeGenerator::PrintCode(code, info);
   info->SetCode(code);  // may be an empty handle.
   return !code.is_null();
 }
 
 
+unsigned FullCodeGenerator::EmitStackCheckTable() {
+  // The stack check table consists of a length (in number of entries)
+  // field, and then a sequence of entries.  Each entry is a pair of AST id
+  // and code-relative pc offset.
+  masm()->Align(kIntSize);
+  masm()->RecordComment("[ Stack check table");
+  unsigned offset = masm()->pc_offset();
+  unsigned length = stack_checks_.length();
+  __ dd(length);
+  for (unsigned i = 0; i < length; ++i) {
+    __ dd(stack_checks_[i].id);
+    __ dd(stack_checks_[i].pc_and_state);
+  }
+  masm()->RecordComment("]");
+  return offset;
+}
+
+
+void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
+  // Fill in the deoptimization information.
+  ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
+  if (!info_->HasDeoptimizationSupport()) return;
+  int length = bailout_entries_.length();
+  Handle<DeoptimizationOutputData> data =
+      Factory::NewDeoptimizationOutputData(length, TENURED);
+  for (int i = 0; i < length; i++) {
+    data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
+    data->SetPcAndState(i, Smi::FromInt(bailout_entries_[i].pc_and_state));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+void FullCodeGenerator::PrepareForBailout(AstNode* node, State state) {
+  PrepareForBailoutForId(node->id(), state);
+}
+
+
+void FullCodeGenerator::RecordJSReturnSite(Call* call) {
+  // We record the offset of the function return so we can rebuild the frame
+  // if the function was inlined, i.e., this is the return address in the
+  // inlined function's frame.
+  //
+  // The state is ignored.  We defensively set it to TOS_REG, which is the
+  // real state of the unoptimized code at the return site.
+  PrepareForBailoutForId(call->ReturnId(), TOS_REG);
+#ifdef DEBUG
+  // In debug builds, mark the return so we can verify that this function
+  // was called.
+  ASSERT(!call->return_is_recorded_);
+  call->return_is_recorded_ = true;
+#endif
+}
+
+
+void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
+  // There's no need to prepare this code for bailouts from already optimized
+  // code or code that can't be optimized.
+  if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
+  unsigned pc_and_state =
+      StateField::encode(state) | PcField::encode(masm_->pc_offset());
+  BailoutEntry entry = { id, pc_and_state };
+#ifdef DEBUG
+  // Assert that we don't have multiple bailout entries for the same node.
+  for (int i = 0; i < bailout_entries_.length(); i++) {
+    if (bailout_entries_.at(i).id == entry.id) {
+      AstPrinter printer;
+      PrintF("%s", printer.PrintProgram(info_->function()));
+      UNREACHABLE();
+    }
+  }
+#endif  // DEBUG
+  bailout_entries_.Add(entry);
+}
+
+
+void FullCodeGenerator::RecordStackCheck(int ast_id) {
+  // The pc offset does not need to be encoded and packed together with a
+  // state.
+  BailoutEntry entry = { ast_id, masm_->pc_offset() };
+  stack_checks_.Add(entry);
+}
+
+
 int FullCodeGenerator::SlotOffset(Slot* slot) {
   ASSERT(slot != NULL);
   // Offset is negative because higher indexes are at lower addresses.
@@ -335,13 +426,11 @@
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
-  // Move value into place.
   __ Move(result_register(), reg);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
-  // Move value into place.
   __ push(reg);
 }
 
@@ -349,6 +438,7 @@
 void FullCodeGenerator::TestContext::Plug(Register reg) const {
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -370,6 +460,7 @@
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -558,10 +649,9 @@
 }
 
 
-void FullCodeGenerator::SetSourcePosition(
-    int pos, PositionRecordingType recording_type) {
+void FullCodeGenerator::SetSourcePosition(int pos) {
   if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
-    masm_->positions_recorder()->RecordPosition(pos, recording_type);
+    masm_->positions_recorder()->RecordPosition(pos);
   }
 }
 
@@ -581,8 +671,12 @@
 
 FullCodeGenerator::InlineFunctionGenerator
   FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
-    return kInlineFunctionGenerators[
-      static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
+    int lookup_index =
+        static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction);
+    ASSERT(lookup_index >= 0);
+    ASSERT(static_cast<size_t>(lookup_index) <
+           ARRAY_SIZE(kInlineFunctionGenerators));
+    return kInlineFunctionGenerators[lookup_index];
 }
 
 
@@ -594,7 +688,6 @@
   ASSERT(function->intrinsic_type == Runtime::INLINE);
   InlineFunctionGenerator generator =
       FindInlineFunctionGenerator(function->function_id);
-  ASSERT(generator != NULL);
   ((*this).*(generator))(args);
 }
 
@@ -615,7 +708,8 @@
   switch (op) {
     case Token::COMMA:
       VisitForEffect(left);
-      Visit(right);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
+      context()->HandleExpression(right);
       break;
 
     case Token::OR:
@@ -670,8 +764,10 @@
 
   context()->EmitLogicalLeft(expr, &eval_right, &done);
 
+  PrepareForBailoutForId(expr->RightId(), NO_REGISTERS);
   __ bind(&eval_right);
-  Visit(expr->right());
+  if (context()->IsTest()) ForwardBailoutToChild(expr);
+  context()->HandleExpression(expr->right());
 
   __ bind(&done);
 }
@@ -693,15 +789,17 @@
     BinaryOperation* expr,
     Label* eval_right,
     Label* done) const {
-  codegen()->Visit(expr->left());
+  HandleExpression(expr->left());
   // We want the value in the accumulator for the test, and on the stack in case
   // we need it.
   __ push(result_register());
   Label discard, restore;
   if (expr->op() == Token::OR) {
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(&restore, &discard, &restore);
   } else {
     ASSERT(expr->op() == Token::AND);
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(&discard, &restore, &restore);
   }
   __ bind(&restore);
@@ -722,9 +820,11 @@
   __ push(result_register());
   Label discard;
   if (expr->op() == Token::OR) {
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(done, &discard, &discard);
   } else {
     ASSERT(expr->op() == Token::AND);
+    codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     codegen()->DoTest(&discard, done, &discard);
   }
   __ bind(&discard);
@@ -746,12 +846,66 @@
 }
 
 
+void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
+  if (!info_->HasDeoptimizationSupport()) return;
+  ASSERT(context()->IsTest());
+  ASSERT(expr == forward_bailout_stack_->expr());
+  forward_bailout_pending_ = forward_bailout_stack_;
+}
+
+
+void FullCodeGenerator::EffectContext::HandleExpression(
+    Expression* expr) const {
+  codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::HandleExpression(
+    Expression* expr) const {
+  codegen()->HandleInNonTestContext(expr, TOS_REG);
+}
+
+
+void FullCodeGenerator::StackValueContext::HandleExpression(
+    Expression* expr) const {
+  codegen()->HandleInNonTestContext(expr, NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::TestContext::HandleExpression(Expression* expr) const {
+  codegen()->VisitInTestContext(expr);
+}
+
+
+void FullCodeGenerator::HandleInNonTestContext(Expression* expr, State state) {
+  ASSERT(forward_bailout_pending_ == NULL);
+  AstVisitor::Visit(expr);
+  PrepareForBailout(expr, state);
+  // Forwarding bailouts to children is a one shot operation. It
+  // should have been processed at this point.
+  ASSERT(forward_bailout_pending_ == NULL);
+}
+
+
+void FullCodeGenerator::VisitInTestContext(Expression* expr) {
+  ForwardBailoutStack stack(expr, forward_bailout_pending_);
+  ForwardBailoutStack* saved = forward_bailout_stack_;
+  forward_bailout_pending_ = NULL;
+  forward_bailout_stack_ = &stack;
+  AstVisitor::Visit(expr);
+  forward_bailout_stack_ = saved;
+}
+
+
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
+
+  PrepareForBailoutForId(stmt->EntryId(), TOS_REG);
   VisitStatements(stmt->statements());
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -775,18 +929,24 @@
 
   if (stmt->HasElseStatement()) {
     VisitForControl(stmt->condition(), &then_part, &else_part, &then_part);
+    PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
     __ bind(&then_part);
     Visit(stmt->then_statement());
     __ jmp(&done);
 
+    PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
     __ bind(&else_part);
     Visit(stmt->else_statement());
   } else {
     VisitForControl(stmt->condition(), &then_part, &done, &then_part);
+    PrepareForBailoutForId(stmt->ThenId(), NO_REGISTERS);
     __ bind(&then_part);
     Visit(stmt->then_statement());
+
+    PrepareForBailoutForId(stmt->ElseId(), NO_REGISTERS);
   }
   __ bind(&done);
+  PrepareForBailoutForId(stmt->id(), NO_REGISTERS);
 }
 
 
@@ -883,7 +1043,7 @@
 void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   Comment cmnt(masm_, "[ DoWhileStatement");
   SetStatementPosition(stmt);
-  Label body, stack_limit_hit, stack_check_success, done;
+  Label body, stack_check;
 
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
@@ -891,75 +1051,65 @@
   __ bind(&body);
   Visit(stmt->body());
 
-  // Check stack before looping.
-  __ bind(loop_statement.continue_target());
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
-
   // Record the position of the do while condition and make sure it is
   // possible to break on the condition.
+  __ bind(loop_statement.continue_target());
+  PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   SetExpressionPosition(stmt->cond(), stmt->condition_position());
   VisitForControl(stmt->cond(),
-                  &body,
+                  &stack_check,
                   loop_statement.break_target(),
-                  loop_statement.break_target());
+                  &stack_check);
 
+  // Check stack before looping.
+  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+  __ bind(&stack_check);
+  EmitStackCheck(stmt);
+  __ jmp(&body);
+
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(loop_statement.break_target());
-  __ jmp(&done);
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
-  __ bind(&done);
   decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
   Comment cmnt(masm_, "[ WhileStatement");
-  Label body, stack_limit_hit, stack_check_success, done;
+  Label test, body;
 
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
   // Emit the test at the bottom of the loop.
-  __ jmp(loop_statement.continue_target());
+  __ jmp(&test);
 
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&body);
   Visit(stmt->body());
-  __ bind(loop_statement.continue_target());
 
   // Emit the statement position here as this is where the while
   // statement code starts.
+  __ bind(loop_statement.continue_target());
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
+  EmitStackCheck(stmt);
 
+  __ bind(&test);
   VisitForControl(stmt->cond(),
                   &body,
                   loop_statement.break_target(),
                   loop_statement.break_target());
 
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(loop_statement.break_target());
-  __ jmp(&done);
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
-  __ bind(&done);
   decrement_loop_depth();
 }
 
 
 void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
   Comment cmnt(masm_, "[ ForStatement");
-  Label test, body, stack_limit_hit, stack_check_success;
+  Label test, body;
 
   Iteration loop_statement(this, stmt);
   if (stmt->init() != NULL) {
@@ -970,30 +1120,25 @@
   // Emit the test at the bottom of the loop (even if empty).
   __ jmp(&test);
 
-    __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&body);
   Visit(stmt->body());
 
+  PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   __ bind(loop_statement.continue_target());
-
   SetStatementPosition(stmt);
   if (stmt->next() != NULL) {
     Visit(stmt->next());
   }
 
-  __ bind(&test);
   // Emit the statement position here as this is where the for
   // statement code starts.
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
+  EmitStackCheck(stmt);
 
+  __ bind(&test);
   if (stmt->cond() != NULL) {
     VisitForControl(stmt->cond(),
                     &body,
@@ -1003,6 +1148,7 @@
     __ jmp(&body);
   }
 
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(loop_statement.break_target());
   decrement_loop_depth();
 }
@@ -1134,6 +1280,7 @@
   Label true_case, false_case, done;
   VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
 
+  PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression(),
                         expr->then_expression_position());
@@ -1144,14 +1291,16 @@
                     for_test->false_label(),
                     NULL);
   } else {
-    Visit(expr->then_expression());
+    context()->HandleExpression(expr->then_expression());
     __ jmp(&done);
   }
 
+  PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
+  if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  Visit(expr->else_expression());
+  context()->HandleExpression(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1159,12 +1308,6 @@
 }
 
 
-void FullCodeGenerator::VisitSlot(Slot* expr) {
-  // Slots do not appear directly in the AST.
-  UNREACHABLE();
-}
-
-
 void FullCodeGenerator::VisitLiteral(Literal* expr) {
   Comment cmnt(masm_, "[ Literal");
   context()->Plug(expr->handle());
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 02335a9..0482ee8 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -31,11 +31,16 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "code-stubs.h"
+#include "codegen.h"
 #include "compiler.h"
 
 namespace v8 {
 namespace internal {
 
+// Forward declarations.
+class JumpPatchSite;
+
 // AST node visitor which can tell whether a given statement will be breakable
 // when the code is compiled by the full compiler in the debugger. This means
 // that there will be an IC (load/store/call) in the code generated for the
@@ -66,17 +71,39 @@
 
 class FullCodeGenerator: public AstVisitor {
  public:
+  enum State {
+    NO_REGISTERS,
+    TOS_REG
+  };
+
   explicit FullCodeGenerator(MacroAssembler* masm)
       : masm_(masm),
         info_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
-        context_(NULL) {
+        context_(NULL),
+        bailout_entries_(0),
+        stack_checks_(2),  // There's always at least one.
+        forward_bailout_stack_(NULL),
+        forward_bailout_pending_(NULL) {
   }
 
   static bool MakeCode(CompilationInfo* info);
 
   void Generate(CompilationInfo* info);
+  void PopulateDeoptimizationData(Handle<Code> code);
+
+  class StateField : public BitField<State, 0, 8> { };
+  class PcField    : public BitField<unsigned, 8, 32-8> { };
+
+  static const char* State2String(State state) {
+    switch (state) {
+      case NO_REGISTERS: return "NO_REGISTERS";
+      case TOS_REG: return "TOS_REG";
+    }
+    UNREACHABLE();
+    return NULL;
+  }
 
  private:
   class Breakable;
@@ -229,6 +256,24 @@
     DISALLOW_COPY_AND_ASSIGN(ForIn);
   };
 
+  // The forward bailout stack keeps track of the expressions that can
+  // bail out to just before the control flow is split in a child
+  // node. The stack elements are linked together through the parent
+  // link when visiting expressions in test contexts after requesting
+  // bailout in child forwarding.
+  class ForwardBailoutStack BASE_EMBEDDED {
+   public:
+    ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
+        : expr_(expr), parent_(parent) { }
+
+    Expression* expr() const { return expr_; }
+    ForwardBailoutStack* parent() const { return parent_; }
+
+   private:
+    Expression* const expr_;
+    ForwardBailoutStack* const parent_;
+  };
+
   enum ConstantOperand {
     kNoConstants,
     kLeftConstant,
@@ -278,19 +323,23 @@
   // register.
   MemOperand EmitSlotSearch(Slot* slot, Register scratch);
 
+  // Forward the bailout responsibility for the given expression to
+  // the next child visited (which must be in a test context).
+  void ForwardBailoutToChild(Expression* expr);
+
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    Visit(expr);
+    HandleInNonTestContext(expr, NO_REGISTERS);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    Visit(expr);
+    HandleInNonTestContext(expr, TOS_REG);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    Visit(expr);
+    HandleInNonTestContext(expr, NO_REGISTERS);
   }
 
   void VisitForControl(Expression* expr,
@@ -298,9 +347,15 @@
                        Label* if_false,
                        Label* fall_through) {
     TestContext context(this, if_true, if_false, fall_through);
-    Visit(expr);
+    VisitInTestContext(expr);
+    // Forwarding bailouts to children is a one shot operation. It
+    // should have been processed at this point.
+    ASSERT(forward_bailout_pending_ == NULL);
   }
 
+  void HandleInNonTestContext(Expression* expr, State state);
+  void VisitInTestContext(Expression* expr);
+
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
 
@@ -314,12 +369,39 @@
                          Label* if_false,
                          Label* fall_through);
 
+  // Bailout support.
+  void PrepareForBailout(AstNode* node, State state);
+  void PrepareForBailoutForId(int id, State state);
+
+  // Record a call's return site offset, used to rebuild the frame if the
+  // called function was inlined at the site.
+  void RecordJSReturnSite(Call* call);
+
+  // Prepare for bailout before a test (or compare) and branch.  If
+  // should_normalize, then the following comparison will not handle the
+  // canonical JS true value so we will insert a (dead) test against true at
+  // the actual bailout target from the optimized code. If not
+  // should_normalize, the true and false labels are ignored.
+  void PrepareForBailoutBeforeSplit(State state,
+                                    bool should_normalize,
+                                    Label* if_true,
+                                    Label* if_false);
+
   // Platform-specific code for a variable, constant, or function
   // declaration.  Functions have an initial value.
   void EmitDeclaration(Variable* variable,
                        Variable::Mode mode,
                        FunctionLiteral* function);
 
+  // Platform-specific code for checking the stack limit at the back edge of
+  // a loop.
+  void EmitStackCheck(IterationStatement* stmt);
+  // Record the OSR AST id corresponding to a stack check in the code.
+  void RecordStackCheck(int osr_ast_id);
+  // Emit a table of stack check ids and pcs into the code stream.  Return
+  // the offset of the start of the table.
+  unsigned EmitStackCheckTable();
+
   // Platform-specific return sequence
   void EmitReturnSequence();
 
@@ -406,7 +488,7 @@
 
   // Assign to the given expression as if via '='. The right-hand-side value
   // is expected in the accumulator.
-  void EmitAssignment(Expression* expr);
+  void EmitAssignment(Expression* expr, int bailout_ast_id);
 
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
@@ -427,9 +509,7 @@
   void SetStatementPosition(Statement* stmt);
   void SetExpressionPosition(Expression* expr, int pos);
   void SetStatementPosition(int pos);
-  void SetSourcePosition(
-      int pos,
-      PositionRecordingType recording_type = NORMAL_POSITION);
+  void SetSourcePosition(int pos);
 
   // Non-local control flow support.
   void EnterFinallyBlock();
@@ -460,6 +540,10 @@
   // Helper for calling an IC stub.
   void EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode);
 
+  // Calling an IC stub with a patch site. Passing NULL for patch_site
+  // indicates no inlined smi code and emits a nop after the IC call.
+  void EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site);
+
   // Set fields in the stack frame. Offsets are the frame pointer relative
   // offsets defined in, e.g., StandardFrameConstants.
   void StoreToFrameField(int frame_offset, Register value);
@@ -477,14 +561,13 @@
 
   void VisitForTypeofValue(Expression* expr);
 
-  MacroAssembler* masm_;
-  CompilationInfo* info_;
+  struct BailoutEntry {
+    unsigned id;
+    unsigned pc_and_state;
+  };
 
-  Label return_label_;
-  NestedStatement* nesting_stack_;
-  int loop_depth_;
 
-  class ExpressionContext {
+  class ExpressionContext BASE_EMBEDDED {
    public:
     explicit ExpressionContext(FullCodeGenerator* codegen)
         : masm_(codegen->masm()), old_(codegen->context()), codegen_(codegen) {
@@ -510,7 +593,8 @@
 
     // Emit code to convert pure control flow to a pair of unbound labels into
     // the result expected according to this expression context.  The
-    // implementation may decide to bind either of the labels.
+    // implementation will bind both labels unless it's a TestContext, which
+    // won't bind them at this point.
     virtual void Plug(Label* materialize_true,
                       Label* materialize_false) const = 0;
 
@@ -532,12 +616,14 @@
                              Label** if_false,
                              Label** fall_through) const = 0;
 
+    virtual void HandleExpression(Expression* expr) const = 0;
+
     // Returns true if we are evaluating only for side effects (ie if the result
-    // will be discarded.
+    // will be discarded).
     virtual bool IsEffect() const { return false; }
 
     // Returns true if we are branching on the value rather than materializing
-    // it.
+    // it.  Only used for asserts.
     virtual bool IsTest() const { return false; }
 
    protected:
@@ -571,6 +657,7 @@
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
   };
 
   class StackValueContext : public ExpressionContext {
@@ -594,6 +681,7 @@
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
   };
 
   class TestContext : public ExpressionContext {
@@ -632,6 +720,7 @@
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
     virtual bool IsTest() const { return true; }
 
    private:
@@ -661,10 +750,20 @@
                              Label** if_true,
                              Label** if_false,
                              Label** fall_through) const;
+    virtual void HandleExpression(Expression* expr) const;
     virtual bool IsEffect() const { return true; }
   };
 
+  MacroAssembler* masm_;
+  CompilationInfo* info_;
+  Label return_label_;
+  NestedStatement* nesting_stack_;
+  int loop_depth_;
   const ExpressionContext* context_;
+  ZoneList<BailoutEntry> bailout_entries_;
+  ZoneList<BailoutEntry> stack_checks_;
+  ForwardBailoutStack* forward_bailout_stack_;
+  ForwardBailoutStack* forward_bailout_pending_;
 
   friend class NestedStatement;
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 5339840..18cdc5a 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -30,6 +30,8 @@
 #include "api.h"
 #include "global-handles.h"
 
+#include "vm-state-inl.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/globals.h b/src/globals.h
index 88c3e78..35156ae 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -28,6 +28,8 @@
 #ifndef V8_GLOBALS_H_
 #define V8_GLOBALS_H_
 
+#include "../include/v8stdint.h"
+
 namespace v8 {
 namespace internal {
 
@@ -147,13 +149,16 @@
 #ifdef _MSC_VER
 #define V8_UINT64_C(x)  (x ## UI64)
 #define V8_INT64_C(x)   (x ## I64)
+#define V8_INTPTR_C(x)  (x ## I64)
 #define V8_PTR_PREFIX "ll"
 #else  // _MSC_VER
 #define V8_UINT64_C(x)  (x ## UL)
 #define V8_INT64_C(x)   (x ## L)
+#define V8_INTPTR_C(x)  (x ## L)
 #define V8_PTR_PREFIX "l"
 #endif  // _MSC_VER
 #else  // V8_HOST_ARCH_64_BIT
+#define V8_INTPTR_C(x)  (x)
 #define V8_PTR_PREFIX ""
 #endif  // V8_HOST_ARCH_64_BIT
 
@@ -223,6 +228,7 @@
 const int kBinary32MantissaBits = 23;
 const int kBinary32ExponentShift = 23;
 
+
 // The expression OFFSET_OF(type, field) computes the byte-offset
 // of the specified field relative to the containing type. This
 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
diff --git a/src/handles.cc b/src/handles.cc
index 37a5011..68c61b5 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -39,6 +39,7 @@
 #include "runtime.h"
 #include "string-search.h"
 #include "stub-cache.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -224,13 +225,7 @@
 
 
 Handle<String> FlattenGetString(Handle<String> string) {
-  Handle<String> result;
-  CALL_AND_RETRY(string->TryFlatten(),
-                 { result = Handle<String>(String::cast(__object__));
-                   break; },
-                 return Handle<String>());
-  ASSERT(string->IsFlat());
-  return result;
+  CALL_HEAP_FUNCTION(string->TryFlatten(), String);
 }
 
 
@@ -803,7 +798,7 @@
 static bool CompileLazyHelper(CompilationInfo* info,
                               ClearExceptionFlag flag) {
   // Compile the source information to a code object.
-  ASSERT(!info->shared_info()->is_compiled());
+  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
   bool result = Compiler::CompileLazy(info);
   ASSERT(result != Top::has_pending_exception());
   if (!result && flag == CLEAR_EXCEPTION) Top::clear_pending_exception();
@@ -820,36 +815,47 @@
 
 bool CompileLazy(Handle<JSFunction> function,
                  ClearExceptionFlag flag) {
+  bool result = true;
   if (function->shared()->is_compiled()) {
-    function->set_code(function->shared()->code());
-    PROFILE(FunctionCreateEvent(*function));
+    function->ReplaceCode(function->shared()->code());
     function->shared()->set_code_age(0);
-    return true;
   } else {
     CompilationInfo info(function);
-    bool result = CompileLazyHelper(&info, flag);
+    result = CompileLazyHelper(&info, flag);
     ASSERT(!result || function->is_compiled());
-    PROFILE(FunctionCreateEvent(*function));
-    return result;
   }
+  if (result && function->is_compiled()) {
+    PROFILE(FunctionCreateEvent(*function));
+  }
+  return result;
 }
 
 
 bool CompileLazyInLoop(Handle<JSFunction> function,
                        ClearExceptionFlag flag) {
+  bool result = true;
   if (function->shared()->is_compiled()) {
-    function->set_code(function->shared()->code());
-    PROFILE(FunctionCreateEvent(*function));
+    function->ReplaceCode(function->shared()->code());
     function->shared()->set_code_age(0);
-    return true;
   } else {
     CompilationInfo info(function);
     info.MarkAsInLoop();
-    bool result = CompileLazyHelper(&info, flag);
+    result = CompileLazyHelper(&info, flag);
     ASSERT(!result || function->is_compiled());
-    PROFILE(FunctionCreateEvent(*function));
-    return result;
   }
+  if (result && function->is_compiled()) {
+    PROFILE(FunctionCreateEvent(*function));
+  }
+  return result;
+}
+
+
+bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
+  CompilationInfo info(function);
+  info.SetOptimizing(osr_ast_id);
+  bool result = CompileLazyHelper(&info, KEEP_EXCEPTION);
+  if (result) PROFILE(FunctionCreateEvent(*function));
+  return result;
 }
 
 
diff --git a/src/handles.h b/src/handles.h
index 2e18ab3..8fd25dc 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -342,6 +342,8 @@
 
 bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
 
+bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id);
+
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
diff --git a/src/heap-inl.h b/src/heap-inl.h
index ba50c0f..ef83998 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -409,8 +409,8 @@
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
-    Heap::CollectGarbage(Failure::cast(__maybe_object__)->                \
-                             allocation_space());                         \
+    Heap::CollectGarbage(                                                 \
+        Failure::cast(__maybe_object__)->allocation_space());             \
     __maybe_object__ = FUNCTION_CALL;                                     \
     if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
     if (__maybe_object__->IsOutOfMemory()) {                              \
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 91ac986..dfda7c6 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2009-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -348,30 +348,37 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
+                                         int type,
+                                         v8::ActivityControl* control) {
   ASSERT(singleton_ != NULL);
-  return singleton_->TakeSnapshotImpl(name, type);
+  return singleton_->TakeSnapshotImpl(name, type, control);
 }
 
 
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, int type) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
+                                         int type,
+                                         v8::ActivityControl* control) {
   ASSERT(singleton_ != NULL);
-  return singleton_->TakeSnapshotImpl(name, type);
+  return singleton_->TakeSnapshotImpl(name, type, control);
 }
 
 
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, int type) {
-  Heap::CollectAllGarbage(true);
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
+                                             int type,
+                                             v8::ActivityControl* control) {
   HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
   HeapSnapshot* result =
       snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
+  bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
-      HeapSnapshotGenerator generator(result);
-      generator.GenerateSnapshot();
+      HeapSnapshotGenerator generator(result, control);
+      generation_completed = generator.GenerateSnapshot();
       break;
     }
     case HeapSnapshot::kAggregated: {
+      Heap::CollectAllGarbage(true);
       AggregatedHeapSnapshot agg_snapshot;
       AggregatedHeapSnapshotGenerator generator(&agg_snapshot);
       generator.GenerateSnapshot();
@@ -381,13 +388,19 @@
     default:
       UNREACHABLE();
   }
-  snapshots_->SnapshotGenerationFinished();
+  if (!generation_completed) {
+    delete result;
+    result = NULL;
+  }
+  snapshots_->SnapshotGenerationFinished(result);
   return result;
 }
 
 
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, int type) {
-  return TakeSnapshotImpl(snapshots_->GetName(name), type);
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
+                                             int type,
+                                             v8::ActivityControl* control) {
+  return TakeSnapshotImpl(snapshots_->GetName(name), type, control);
 }
 
 
@@ -795,7 +808,7 @@
 
 
 void AggregatedHeapSnapshotGenerator::GenerateSnapshot() {
-  HeapIterator iterator(HeapIterator::kPreciseFiltering);
+  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     CollectStats(obj);
     agg_snapshot_->js_cons_profile()->CollectStats(obj);
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 2ef081e..90c664e 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2009-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -56,8 +56,12 @@
   static void TearDown();
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  static HeapSnapshot* TakeSnapshot(const char* name, int type);
-  static HeapSnapshot* TakeSnapshot(String* name, int type);
+  static HeapSnapshot* TakeSnapshot(const char* name,
+                                    int type,
+                                    v8::ActivityControl* control);
+  static HeapSnapshot* TakeSnapshot(String* name,
+                                    int type,
+                                    v8::ActivityControl* control);
   static int GetSnapshotsCount();
   static HeapSnapshot* GetSnapshot(int index);
   static HeapSnapshot* FindSnapshot(unsigned uid);
@@ -75,8 +79,12 @@
  private:
   HeapProfiler();
   ~HeapProfiler();
-  HeapSnapshot* TakeSnapshotImpl(const char* name, int type);
-  HeapSnapshot* TakeSnapshotImpl(String* name, int type);
+  HeapSnapshot* TakeSnapshotImpl(const char* name,
+                                 int type,
+                                 v8::ActivityControl* control);
+  HeapSnapshot* TakeSnapshotImpl(String* name,
+                                 int type,
+                                 v8::ActivityControl* control);
 
   HeapSnapshotsCollection* snapshots_;
   unsigned next_snapshot_uid_;
diff --git a/src/heap.cc b/src/heap.cc
index 16415ad..1e99991 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -38,10 +38,12 @@
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "runtime-profiler.h"
 #include "scanner-base.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
 #include "regexp-macro-assembler.h"
 #include "arm/regexp-macro-assembler-arm.h"
@@ -839,6 +841,8 @@
   ContextSlotCache::Clear();
   DescriptorLookupCache::Clear();
 
+  RuntimeProfiler::MarkCompactPrologue(is_compacting);
+
   CompilationCache::MarkCompactPrologue();
 
   CompletelyClearInstanceofCache();
@@ -1049,6 +1053,14 @@
   // Scavenge object reachable from the global contexts list directly.
   scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
 
+  // Scavenge objects reachable from the runtime-profiler sampler
+  // window directly.
+  Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
+  int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
+  scavenge_visitor.VisitPointers(
+      sampler_window_address,
+      sampler_window_address + sampler_window_size);
+
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
   UpdateNewSpaceReferencesInExternalStringTable(
@@ -1116,6 +1128,40 @@
 }
 
 
+static Object* ProcessFunctionWeakReferences(Object* function,
+                                             WeakObjectRetainer* retainer) {
+  Object* head = Heap::undefined_value();
+  JSFunction* tail = NULL;
+  Object* candidate = function;
+  while (!candidate->IsUndefined()) {
+    // Check whether to keep the candidate in the list.
+    JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
+    Object* retain = retainer->RetainAs(candidate);
+    if (retain != NULL) {
+      if (head->IsUndefined()) {
+        // First element in the list.
+        head = candidate_function;
+      } else {
+        // Subsequent elements in the list.
+        ASSERT(tail != NULL);
+        tail->set_next_function_link(candidate_function);
+      }
+      // Retained function is new tail.
+      tail = candidate_function;
+    }
+    // Move to next element in the list.
+    candidate = candidate_function->next_function_link();
+  }
+
+  // Terminate the list if there is one or more elements.
+  if (tail != NULL) {
+    tail->set_next_function_link(Heap::undefined_value());
+  }
+
+  return head;
+}
+
+
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
   Object* head = undefined_value();
   Context* tail = NULL;
@@ -1137,6 +1183,15 @@
       }
       // Retained context is new tail.
       tail = candidate_context;
+
+      // Process the weak list of optimized functions for the context.
+      Object* function_list_head =
+          ProcessFunctionWeakReferences(
+              candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
+              retainer);
+      candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
+                                       function_list_head,
+                                       UPDATE_WRITE_BARRIER);
     }
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
@@ -1651,6 +1706,11 @@
   }
   set_byte_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_empty_byte_array(ByteArray::cast(obj));
+
   { MaybeObject* maybe_obj =
         AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2245,9 +2305,11 @@
   share->set_debug_info(undefined_value());
   share->set_inferred_name(empty_string());
   share->set_compiler_hints(0);
+  share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
   share->set_initial_map(undefined_value());
   share->set_this_property_assignments_count(0);
   share->set_this_property_assignments(undefined_value());
+  share->set_opt_count(0);
   share->set_num_literals(0);
   share->set_end_position(0);
   share->set_function_token_position(0);
@@ -2666,6 +2728,7 @@
   code->set_instruction_size(desc.instr_size);
   code->set_relocation_info(ByteArray::cast(reloc_info));
   code->set_flags(flags);
+  code->set_deoptimization_data(empty_fixed_array());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -2794,6 +2857,7 @@
   function->set_prototype_or_initial_map(prototype);
   function->set_context(undefined_value());
   function->set_literals(empty_fixed_array());
+  function->set_next_function_link(undefined_value());
   return function;
 }
 
@@ -4419,7 +4483,7 @@
       MemoryAllocator::Size() + MemoryAllocator::Available();
   *stats->os_error = OS::GetLastError();
   if (take_snapshot) {
-    HeapIterator iterator(HeapIterator::kPreciseFiltering);
+    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -4853,13 +4917,20 @@
 }
 
 
-class FreeListNodesFilter {
+class HeapObjectsFilter {
+ public:
+  virtual ~HeapObjectsFilter() {}
+  virtual bool SkipObject(HeapObject* object) = 0;
+};
+
+
+class FreeListNodesFilter : public HeapObjectsFilter {
  public:
   FreeListNodesFilter() {
     MarkFreeListNodes();
   }
 
-  inline bool IsFreeListNode(HeapObject* object) {
+  bool SkipObject(HeapObject* object) {
     if (object->IsMarked()) {
       object->ClearMark();
       return true;
@@ -4891,6 +4962,65 @@
 };
 
 
+class UnreachableObjectsFilter : public HeapObjectsFilter {
+ public:
+  UnreachableObjectsFilter() {
+    MarkUnreachableObjects();
+  }
+
+  bool SkipObject(HeapObject* object) {
+    if (object->IsMarked()) {
+      object->ClearMark();
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+ private:
+  class UnmarkingVisitor : public ObjectVisitor {
+   public:
+    UnmarkingVisitor() : list_(10) {}
+
+    void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) {
+        if (!(*p)->IsHeapObject()) continue;
+        HeapObject* obj = HeapObject::cast(*p);
+        if (obj->IsMarked()) {
+          obj->ClearMark();
+          list_.Add(obj);
+        }
+      }
+    }
+
+    bool can_process() { return !list_.is_empty(); }
+
+    void ProcessNext() {
+      HeapObject* obj = list_.RemoveLast();
+      obj->Iterate(this);
+    }
+
+   private:
+    List<HeapObject*> list_;
+  };
+
+  void MarkUnreachableObjects() {
+    HeapIterator iterator;
+    for (HeapObject* obj = iterator.next();
+         obj != NULL;
+         obj = iterator.next()) {
+      obj->SetMark();
+    }
+    UnmarkingVisitor visitor;
+    Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
+    while (visitor.can_process())
+      visitor.ProcessNext();
+  }
+
+  AssertNoAllocation no_alloc;
+};
+
+
 HeapIterator::HeapIterator()
     : filtering_(HeapIterator::kNoFiltering),
       filter_(NULL) {
@@ -4898,7 +5028,7 @@
 }
 
 
-HeapIterator::HeapIterator(HeapIterator::FreeListNodesFiltering filtering)
+HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
     : filtering_(filtering),
       filter_(NULL) {
   Init();
@@ -4912,12 +5042,17 @@
 
 void HeapIterator::Init() {
   // Start the iteration.
-  if (filtering_ == kPreciseFiltering) {
-    filter_ = new FreeListNodesFilter;
-    space_iterator_ =
-        new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
-  } else {
-    space_iterator_ = new SpaceIterator;
+  space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
+      new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+  switch (filtering_) {
+    case kFilterFreeListNodes:
+      filter_ = new FreeListNodesFilter;
+      break;
+    case kFilterUnreachable:
+      filter_ = new UnreachableObjectsFilter;
+      break;
+    default:
+      break;
   }
   object_iterator_ = space_iterator_->next();
 }
@@ -4925,9 +5060,9 @@
 
 void HeapIterator::Shutdown() {
 #ifdef DEBUG
-  // Assert that in precise mode we have iterated through all
+  // Assert that in filtering mode we have iterated through all
   // objects. Otherwise, heap will be left in an inconsistent state.
-  if (filtering_ == kPreciseFiltering) {
+  if (filtering_ != kNoFiltering) {
     ASSERT(object_iterator_ == NULL);
   }
 #endif
@@ -4944,7 +5079,7 @@
   if (filter_ == NULL) return NextObject();
 
   HeapObject* obj = NextObject();
-  while (obj != NULL && filter_->IsFreeListNode(obj)) obj = NextObject();
+  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
   return obj;
 }
 
diff --git a/src/heap.h b/src/heap.h
index 93caf3b..18a4afb 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -62,6 +62,7 @@
   V(Object, termination_exception, TerminationException)                       \
   V(Map, hash_table_map, HashTableMap)                                         \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
   V(Map, string_map, StringMap)                                                \
   V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, symbol_map, SymbolMap)                                                \
@@ -173,6 +174,8 @@
   V(value_of_symbol, "valueOf")                                          \
   V(InitializeVarGlobal_symbol, "InitializeVarGlobal")                   \
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
+  V(KeyedLoadSpecialized_symbol, "KeyedLoadSpecialized")                 \
+  V(KeyedStoreSpecialized_symbol, "KeyedStoreSpecialized")               \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
@@ -1116,9 +1119,9 @@
   static int contexts_disposed_;
 
 #if defined(V8_TARGET_ARCH_X64)
-  static const int kMaxObjectSizeInNewSpace = 512*KB;
+  static const int kMaxObjectSizeInNewSpace = 1024*KB;
 #else
-  static const int kMaxObjectSizeInNewSpace = 256*KB;
+  static const int kMaxObjectSizeInNewSpace = 512*KB;
 #endif
 
   static NewSpace new_space_;
@@ -1582,17 +1585,18 @@
 // nodes filtering uses GC marks, it can't be used during MS/MC GC
 // phases. Also, it is forbidden to interrupt iteration in this mode,
 // as this will leave heap objects marked (and thus, unusable).
-class FreeListNodesFilter;
+class HeapObjectsFilter;
 
 class HeapIterator BASE_EMBEDDED {
  public:
-  enum FreeListNodesFiltering {
+  enum HeapObjectsFiltering {
     kNoFiltering,
-    kPreciseFiltering
+    kFilterFreeListNodes,
+    kFilterUnreachable
   };
 
   HeapIterator();
-  explicit HeapIterator(FreeListNodesFiltering filtering);
+  explicit HeapIterator(HeapObjectsFiltering filtering);
   ~HeapIterator();
 
   HeapObject* next();
@@ -1605,8 +1609,8 @@
   void Shutdown();
   HeapObject* NextObject();
 
-  FreeListNodesFiltering filtering_;
-  FreeListNodesFilter* filter_;
+  HeapObjectsFiltering filtering_;
+  HeapObjectsFilter* filter_;
   // Space iterator for iterating all the spaces.
   SpaceIterator* space_iterator_;
   // Object iterator for the space currently being iterated.
@@ -1965,6 +1969,8 @@
 class TranscendentalCache {
  public:
   enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
+  static const int kTranscendentalTypeBits = 3;
+  STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
 
   explicit TranscendentalCache(Type t);
 
@@ -2051,7 +2057,7 @@
 
   // Allow access to the caches_ array as an ExternalReference.
   friend class ExternalReference;
-  // Inline implementation of the caching.
+  // Inline implementation of the cache.
   friend class TranscendentalCacheStub;
 
   static TranscendentalCache* caches_[kNumberOfCaches];
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
new file mode 100644
index 0000000..3f39888
--- /dev/null
+++ b/src/hydrogen-instructions.cc
@@ -0,0 +1,1446 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "factory.h"
+#include "hydrogen.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                                         \
+  LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) {  \
+    return builder->Do##type(this);                                  \
+  }
+HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+const char* Representation::Mnemonic() const {
+  switch (kind_) {
+    case kNone: return "v";
+    case kTagged: return "t";
+    case kDouble: return "d";
+    case kInteger32: return "i";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
+  if (result > kMaxInt) {
+    *overflow = true;
+    return kMaxInt;
+  }
+  if (result < kMinInt) {
+    *overflow = true;
+    return kMinInt;
+  }
+  return static_cast<int32_t>(result);
+}
+
+
+static int32_t AddWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+  int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
+  return ConvertAndSetOverflow(result, overflow);
+}
+
+
+static int32_t SubWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+  int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
+  return ConvertAndSetOverflow(result, overflow);
+}
+
+
+static int32_t MulWithoutOverflow(int32_t a, int32_t b, bool* overflow) {
+  int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
+  return ConvertAndSetOverflow(result, overflow);
+}
+
+
+int32_t Range::Mask() const {
+  if (lower_ == upper_) return lower_;
+  if (lower_ >= 0) {
+    int32_t res = 1;
+    while (res < upper_) {
+      res = (res << 1) | 1;
+    }
+    return res;
+  }
+  return 0xffffffff;
+}
+
+
+void Range::AddConstant(int32_t value) {
+  if (value == 0) return;
+  bool may_overflow = false;  // Overflow is ignored here.
+  lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
+  upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
+  Verify();
+}
+
+
+bool Range::AddAndCheckOverflow(Range* other) {
+  bool may_overflow = false;
+  lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
+  upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
+  KeepOrder();
+  Verify();
+  return may_overflow;
+}
+
+
+bool Range::SubAndCheckOverflow(Range* other) {
+  bool may_overflow = false;
+  lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
+  upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
+  KeepOrder();
+  Verify();
+  return may_overflow;
+}
+
+
+void Range::KeepOrder() {
+  if (lower_ > upper_) {
+    int32_t tmp = lower_;
+    lower_ = upper_;
+    upper_ = tmp;
+  }
+}
+
+
+void Range::Verify() const {
+  ASSERT(lower_ <= upper_);
+}
+
+
+bool Range::MulAndCheckOverflow(Range* other) {
+  bool may_overflow = false;
+  int v1 = MulWithoutOverflow(lower_, other->lower(), &may_overflow);
+  int v2 = MulWithoutOverflow(lower_, other->upper(), &may_overflow);
+  int v3 = MulWithoutOverflow(upper_, other->lower(), &may_overflow);
+  int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
+  lower_ = Min(Min(v1, v2), Min(v3, v4));
+  upper_ = Max(Max(v1, v2), Max(v3, v4));
+  Verify();
+  return may_overflow;
+}
+
+
+const char* HType::ToString() {
+  switch (type_) {
+    case kTagged: return "tagged";
+    case kTaggedPrimitive: return "primitive";
+    case kTaggedNumber: return "number";
+    case kSmi: return "smi";
+    case kHeapNumber: return "heap-number";
+    case kString: return "string";
+    case kBoolean: return "boolean";
+    case kNonPrimitive: return "non-primitive";
+    case kJSArray: return "array";
+    case kJSObject: return "object";
+    case kUninitialized: return "uninitialized";
+  }
+  UNREACHABLE();
+  return "Unreachable code";
+}
+
+
+const char* HType::ToShortString() {
+  switch (type_) {
+    case kTagged: return "t";
+    case kTaggedPrimitive: return "p";
+    case kTaggedNumber: return "n";
+    case kSmi: return "m";
+    case kHeapNumber: return "h";
+    case kString: return "s";
+    case kBoolean: return "b";
+    case kNonPrimitive: return "r";
+    case kJSArray: return "a";
+    case kJSObject: return "o";
+    case kUninitialized: return "z";
+  }
+  UNREACHABLE();
+  return "Unreachable code";
+}
+
+
+HType HType::TypeFromValue(Handle<Object> value) {
+  HType result = HType::Tagged();
+  if (value->IsSmi()) {
+    result = HType::Smi();
+  } else if (value->IsHeapNumber()) {
+    result = HType::HeapNumber();
+  } else if (value->IsString()) {
+    result = HType::String();
+  } else if (value->IsBoolean()) {
+    result = HType::Boolean();
+  } else if (value->IsJSObject()) {
+    result = HType::JSObject();
+  } else if (value->IsJSArray()) {
+    result = HType::JSArray();
+  }
+  return result;
+}
+
+
+int HValue::LookupOperandIndex(int occurrence_index, HValue* op) const {
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i) == op) {
+      if (occurrence_index == 0) return i;
+      --occurrence_index;
+    }
+  }
+  return -1;
+}
+
+
+bool HValue::IsDefinedAfter(HBasicBlock* other) const {
+  return block()->block_id() > other->block_id();
+}
+
+
+bool HValue::UsesMultipleTimes(HValue* op) const {
+  bool seen = false;
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i) == op) {
+      if (seen) return true;
+      seen = true;
+    }
+  }
+  return false;
+}
+
+
+bool HValue::Equals(HValue* other) const {
+  if (other->opcode() != opcode()) return false;
+  if (!other->representation().Equals(representation())) return false;
+  if (!other->type_.Equals(type_)) return false;
+  if (OperandCount() != other->OperandCount()) return false;
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
+  }
+  bool result = DataEquals(other);
+  ASSERT(!result || Hashcode() == other->Hashcode());
+  return result;
+}
+
+
+intptr_t HValue::Hashcode() const {
+  intptr_t result = opcode();
+  int count = OperandCount();
+  for (int i = 0; i < count; ++i) {
+    result = result * 19 + OperandAt(i)->id() + (result >> 7);
+  }
+  return result;
+}
+
+
+void HValue::SetOperandAt(int index, HValue* value) {
+  ASSERT(value == NULL || !value->representation().IsNone());
+  RegisterUse(index, value);
+  InternalSetOperandAt(index, value);
+}
+
+
+void HValue::ReplaceAndDelete(HValue* other) {
+  ReplaceValue(other);
+  Delete();
+}
+
+
+void HValue::ReplaceValue(HValue* other) {
+  ZoneList<HValue*> start_uses(2);
+  for (int i = 0; i < uses_.length(); ++i) {
+    HValue* use = uses_.at(i);
+    if (!use->block()->IsStartBlock()) {
+      InternalReplaceAtUse(use, other);
+      other->uses_.Add(use);
+    } else {
+      start_uses.Add(use);
+    }
+  }
+  uses_.Clear();
+  uses_.AddAll(start_uses);
+}
+
+
+void HValue::ClearOperands() {
+  for (int i = 0; i < OperandCount(); ++i) {
+    SetOperandAt(i, NULL);
+  }
+}
+
+
+void HValue::Delete() {
+  ASSERT(HasNoUses());
+  ClearOperands();
+  DeleteFromGraph();
+}
+
+
+void HValue::ReplaceAtUse(HValue* use, HValue* other) {
+  for (int i = 0; i < use->OperandCount(); ++i) {
+    if (use->OperandAt(i) == this) {
+      use->SetOperandAt(i, other);
+    }
+  }
+}
+
+
+void HValue::ReplaceFirstAtUse(HValue* use, HValue* other, Representation r) {
+  for (int i = 0; i < use->OperandCount(); ++i) {
+    if (use->RequiredInputRepresentation(i).Equals(r) &&
+        use->OperandAt(i) == this) {
+      use->SetOperandAt(i, other);
+      return;
+    }
+  }
+}
+
+
+void HValue::InternalReplaceAtUse(HValue* use, HValue* other) {
+  for (int i = 0; i < use->OperandCount(); ++i) {
+    if (use->OperandAt(i) == this) {
+      // Call internal method that does not update use lists. The caller is
+      // responsible for doing so.
+      use->InternalSetOperandAt(i, other);
+    }
+  }
+}
+
+
+void HValue::SetBlock(HBasicBlock* block) {
+  ASSERT(block_ == NULL || block == NULL);
+  block_ = block;
+  if (id_ == kNoNumber && block != NULL) {
+    id_ = block->graph()->GetNextValueID(this);
+  }
+}
+
+
+void HValue::PrintTypeTo(HType type, StringStream* stream) {
+  stream->Add(type.ToShortString());
+}
+
+
+void HValue::PrintNameTo(StringStream* stream) {
+  stream->Add("%s%d", representation_.Mnemonic(), id());
+}
+
+
+bool HValue::UpdateInferredType() {
+  HType type = CalculateInferredType();
+  bool result = (!type.Equals(type_));
+  type_ = type;
+  return result;
+}
+
+
+void HValue::RegisterUse(int index, HValue* new_value) {
+  HValue* old_value = OperandAt(index);
+  if (old_value == new_value) return;
+  if (old_value != NULL) {
+    ASSERT(old_value->uses_.Contains(this));
+    old_value->uses_.RemoveElement(this);
+  }
+  if (new_value != NULL) {
+    new_value->uses_.Add(this);
+  }
+}
+
+
+void HValue::AddNewRange(Range* r) {
+  if (!HasRange()) ComputeInitialRange();
+  if (!HasRange()) range_ = new Range();
+  ASSERT(HasRange());
+  r->StackUpon(range_);
+  range_ = r;
+}
+
+
+void HValue::RemoveLastAddedRange() {
+  ASSERT(HasRange());
+  ASSERT(range_->next() != NULL);
+  range_ = range_->next();
+}
+
+
+void HValue::ComputeInitialRange() {
+  ASSERT(!HasRange());
+  range_ = InferRange();
+  ASSERT(HasRange());
+}
+
+
+void HInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s", Mnemonic());
+  if (HasSideEffects()) stream->Add("*");
+  stream->Add(" ");
+  PrintDataTo(stream);
+
+  if (range() != NULL) {
+    stream->Add(" range[%d,%d,m0=%d]",
+                range()->lower(),
+                range()->upper(),
+                static_cast<int>(range()->CanBeMinusZero()));
+  }
+
+  int changes_flags = (flags() & HValue::ChangesFlagsMask());
+  if (changes_flags != 0) {
+    stream->Add(" changes[0x%x]", changes_flags);
+  }
+
+  if (representation().IsTagged() && !type().Equals(HType::Tagged())) {
+    stream->Add(" type[%s]", type().ToString());
+  }
+}
+
+
+void HInstruction::Unlink() {
+  ASSERT(IsLinked());
+  ASSERT(!IsControlInstruction());  // Must never move control instructions.
+  clear_block();
+  if (previous_ != NULL) previous_->next_ = next_;
+  if (next_ != NULL) next_->previous_ = previous_;
+}
+
+
+void HInstruction::InsertBefore(HInstruction* next) {
+  ASSERT(!IsLinked());
+  ASSERT(!next->IsBlockEntry());
+  ASSERT(!IsControlInstruction());
+  ASSERT(!next->block()->IsStartBlock());
+  ASSERT(next->previous_ != NULL);
+  HInstruction* prev = next->previous();
+  prev->next_ = this;
+  next->previous_ = this;
+  next_ = next;
+  previous_ = prev;
+  SetBlock(next->block());
+}
+
+
+void HInstruction::InsertAfter(HInstruction* previous) {
+  ASSERT(!IsLinked());
+  ASSERT(!previous->IsControlInstruction());
+  ASSERT(!IsControlInstruction() || previous->next_ == NULL);
+  HBasicBlock* block = previous->block();
+  // Never insert anything except constants into the start block after finishing
+  // it.
+  if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
+    ASSERT(block->end()->SecondSuccessor() == NULL);
+    InsertAfter(block->end()->FirstSuccessor()->first());
+    return;
+  }
+
+  // If we're inserting after an instruction with side-effects that is
+  // followed by a simulate instruction, we need to insert after the
+  // simulate instruction instead.
+  HInstruction* next = previous->next_;
+  if (previous->HasSideEffects() && next != NULL) {
+    ASSERT(next->IsSimulate());
+    previous = next;
+    next = previous->next_;
+  }
+
+  previous_ = previous;
+  next_ = next;
+  SetBlock(block);
+  previous->next_ = this;
+  if (next != NULL) next->previous_ = this;
+}
+
+
+#ifdef DEBUG
+void HInstruction::Verify() const {
+  // Verify that input operands are defined before use.
+  HBasicBlock* cur_block = block();
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* other_operand = OperandAt(i);
+    HBasicBlock* other_block = other_operand->block();
+    if (cur_block == other_block) {
+      if (!other_operand->IsPhi()) {
+        HInstruction* cur = cur_block->first();
+        while (cur != NULL) {
+          ASSERT(cur != this);  // We should reach other_operand before!
+          if (cur == other_operand) break;
+          cur = cur->next();
+        }
+        // Must reach other operand in the same block!
+        ASSERT(cur == other_operand);
+      }
+    } else {
+      ASSERT(other_block->Dominates(cur_block));
+    }
+  }
+
+  // Verify that instructions that may have side-effects are followed
+  // by a simulate instruction.
+  if (HasSideEffects() && !IsOsrEntry()) {
+    ASSERT(next()->IsSimulate());
+  }
+}
+#endif
+
+
+HCall::HCall(int count) : arguments_(Zone::NewArray<HValue*>(count), count) {
+  for (int i = 0; i < count; ++i) arguments_[i] = NULL;
+  set_representation(Representation::Tagged());
+  SetFlagMask(AllSideEffects());
+}
+
+
+void HCall::PrintDataTo(StringStream* stream) const {
+  stream->Add("(");
+  for (int i = 0; i < arguments_.length(); ++i) {
+    if (i != 0) stream->Add(", ");
+    arguments_.at(i)->PrintNameTo(stream);
+  }
+  stream->Add(")");
+}
+
+
+void HClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\")", *class_name());
+}
+
+
+void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintNameTo(stream);
+  stream->Add("[");
+  index()->PrintNameTo(stream);
+  stream->Add("], length ");
+  length()->PrintNameTo(stream);
+}
+
+
+void HCall::SetArgumentAt(int index, HPushArgument* push_argument) {
+  push_argument->set_argument_index(index);
+  SetOperandAt(index, push_argument);
+}
+
+
+void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  if (IsApplyFunction()) {
+    stream->Add("SPECIAL function: apply");
+  } else {
+    stream->Add("%s", *(function()->shared()->DebugName()->ToCString()));
+  }
+  HCall::PrintDataTo(stream);
+}
+
+
+void HBranch::PrintDataTo(StringStream* stream) const {
+  int first_id = FirstSuccessor()->block_id();
+  int second_id = SecondSuccessor()->block_id();
+  stream->Add("on ");
+  value()->PrintNameTo(stream);
+  stream->Add(" (B%d, B%d)", first_id, second_id);
+}
+
+
+void HCompareMapAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("on ");
+  value()->PrintNameTo(stream);
+  stream->Add(" (%p)", *map());
+}
+
+
+void HGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", FirstSuccessor()->block_id());
+}
+
+
+void HReturn::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+}
+
+
+void HThrow::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+}
+
+
+const char* HUnaryMathOperation::OpName() const {
+  switch (op()) {
+    case kMathFloor: return "floor";
+    case kMathRound: return "round";
+    case kMathCeil: return "ceil";
+    case kMathAbs: return "abs";
+    case kMathLog: return "log";
+    case kMathSin: return "sin";
+    case kMathCos: return "cos";
+    case kMathTan: return "tan";
+    case kMathASin: return "asin";
+    case kMathACos: return "acos";
+    case kMathATan: return "atan";
+    case kMathExp: return "exp";
+    case kMathSqrt: return "sqrt";
+    default: break;
+  }
+  return "(unknown operation)";
+}
+
+
+void HUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  const char* name = OpName();
+  stream->Add("%s ", name);
+  value()->PrintNameTo(stream);
+}
+
+
+void HUnaryOperation::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+}
+
+
+void HHasInstanceType::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  switch (from_) {
+    case FIRST_JS_OBJECT_TYPE:
+      if (to_ == LAST_TYPE) stream->Add(" spec_object");
+      break;
+    case JS_REGEXP_TYPE:
+      if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp");
+      break;
+    case JS_ARRAY_TYPE:
+      if (to_ == JS_ARRAY_TYPE) stream->Add(" array");
+      break;
+    case JS_FUNCTION_TYPE:
+      if (to_ == JS_FUNCTION_TYPE) stream->Add(" function");
+      break;
+    default:
+      break;
+  }
+}
+
+
+void HTypeofIs::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  stream->Add(" == ");
+  stream->Add(type_literal_->ToAsciiVector());
+}
+
+
+void HPushArgument::PrintDataTo(StringStream* stream) const {
+  HUnaryOperation::PrintDataTo(stream);
+  if (argument_index() != -1) {
+    stream->Add(" [%d]", argument_index_);
+  }
+}
+
+
+void HChange::PrintDataTo(StringStream* stream) const {
+  HUnaryOperation::PrintDataTo(stream);
+  stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
+
+  if (CanTruncateToInt32()) stream->Add(" truncating-int32");
+  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+}
+
+
+HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
+    HValue* value)  {
+  STATIC_ASSERT((LAST_JS_OBJECT_TYPE + 1) == JS_FUNCTION_TYPE);
+  return new HCheckInstanceType(value, FIRST_JS_OBJECT_TYPE, JS_FUNCTION_TYPE);
+}
+
+
+void HCheckMap::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  stream->Add(" %p", *map());
+}
+
+
+void HCheckFunction::PrintDataTo(StringStream* stream) const {
+  value()->PrintNameTo(stream);
+  stream->Add(" %p", *target());
+}
+
+
+void HCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("](");
+  for (int i = 1; i < arguments_.length(); ++i) {
+    if (i != 1) stream->Add(", ");
+    arguments_.at(i)->PrintNameTo(stream);
+  }
+  stream->Add(")");
+}
+
+
+void HCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s ", *name_string);
+  HCall::PrintDataTo(stream);
+}
+
+
+void HCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s ", *name_string);
+  HCall::PrintDataTo(stream);
+}
+
+
+void HCallRuntime::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s ", *name_string);
+  HCall::PrintDataTo(stream);
+}
+
+void HCallStub::PrintDataTo(StringStream* stream) const {
+  stream->Add("%s(%d)",
+              CodeStub::MajorName(major_key_, false),
+              argument_count_);
+}
+
+
+Range* HValue::InferRange() {
+  if (representation().IsTagged()) {
+    // Tagged values are always in int32 range when converted to integer,
+    // but they can contain -0.
+    Range* result = new Range();
+    result->set_can_be_minus_zero(true);
+    return result;
+  } else if (representation().IsNone()) {
+    return NULL;
+  } else {
+    return new Range();
+  }
+}
+
+
+Range* HConstant::InferRange() {
+  if (has_int32_value_) {
+    Range* result = new Range(int32_value_, int32_value_);
+    result->set_can_be_minus_zero(false);
+    return result;
+  }
+  return HInstruction::InferRange();
+}
+
+
+Range* HPhi::InferRange() {
+  if (representation().IsInteger32()) {
+    if (block()->IsLoopHeader()) {
+      Range* range = new Range(kMinInt, kMaxInt);
+      return range;
+    } else {
+      Range* range = OperandAt(0)->range()->Copy();
+      for (int i = 1; i < OperandCount(); ++i) {
+        range->Union(OperandAt(i)->range());
+      }
+      return range;
+    }
+  } else {
+    return HValue::InferRange();
+  }
+}
+
+
+Range* HAdd::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy();
+    if (!res->AddAndCheckOverflow(b)) {
+      ClearFlag(kCanOverflow);
+    }
+    bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
+    res->set_can_be_minus_zero(m0);
+    return res;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HSub::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy();
+    if (!res->SubAndCheckOverflow(b)) {
+      ClearFlag(kCanOverflow);
+    }
+    res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+    return res;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HMul::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy();
+    if (!res->MulAndCheckOverflow(b)) {
+      ClearFlag(kCanOverflow);
+    }
+    bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
+        (a->CanBeNegative() && b->CanBeZero());
+    res->set_can_be_minus_zero(m0);
+    return res;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HDiv::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* result = new Range();
+    if (left()->range()->CanBeMinusZero()) {
+      result->set_can_be_minus_zero(true);
+    }
+
+    if (left()->range()->CanBeZero() && right()->range()->CanBeNegative()) {
+      result->set_can_be_minus_zero(true);
+    }
+
+    if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
+      SetFlag(HValue::kCanOverflow);
+    }
+
+    if (!right()->range()->CanBeZero()) {
+      ClearFlag(HValue::kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HMod::InferRange() {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* result = new Range();
+    if (a->CanBeMinusZero() || a->CanBeNegative()) {
+      result->set_can_be_minus_zero(true);
+    }
+    if (!right()->range()->CanBeZero()) {
+      ClearFlag(HValue::kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HArithmeticBinaryOperation::InferRange();
+  }
+}
+
+
+void HPhi::PrintTo(StringStream* stream) const {
+  stream->Add("[");
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* value = OperandAt(i);
+    stream->Add(" ");
+    value->PrintNameTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add(" uses%d_%di_%dd_%dt]",
+              uses()->length(),
+              int32_non_phi_uses() + int32_indirect_uses(),
+              double_non_phi_uses() + double_indirect_uses(),
+              tagged_non_phi_uses() + tagged_indirect_uses());
+}
+
+
+void HPhi::AddInput(HValue* value) {
+  inputs_.Add(NULL);
+  SetOperandAt(OperandCount() - 1, value);
+  // Mark phis that may have 'arguments' directly or indirectly as an operand.
+  if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
+    SetFlag(kIsArguments);
+  }
+}
+
+
+bool HPhi::HasReceiverOperand() {
+  for (int i = 0; i < OperandCount(); i++) {
+    if (OperandAt(i)->IsParameter() &&
+        HParameter::cast(OperandAt(i))->index() == 0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+HValue* HPhi::GetRedundantReplacement() const {
+  HValue* candidate = NULL;
+  int count = OperandCount();
+  int position = 0;
+  while (position < count && candidate == NULL) {
+    HValue* current = OperandAt(position++);
+    if (current != this) candidate = current;
+  }
+  while (position < count) {
+    HValue* current = OperandAt(position++);
+    if (current != this && current != candidate) return NULL;
+  }
+  ASSERT(candidate != this);
+  return candidate;
+}
+
+
+void HPhi::DeleteFromGraph() {
+  ASSERT(block() != NULL);
+  block()->RemovePhi(this);
+  ASSERT(block() == NULL);
+}
+
+
+void HPhi::InitRealUses(int phi_id) {
+  // Initialize real uses.
+  phi_id_ = phi_id;
+  for (int j = 0; j < uses()->length(); j++) {
+    HValue* use = uses()->at(j);
+    if (!use->IsPhi()) {
+      int index = use->LookupOperandIndex(0, this);
+      Representation req_rep = use->RequiredInputRepresentation(index);
+      non_phi_uses_[req_rep.kind()]++;
+    }
+  }
+}
+
+
+void HPhi::AddNonPhiUsesFrom(HPhi* other) {
+  for (int i = 0; i < Representation::kNumRepresentations; i++) {
+    indirect_uses_[i] += other->non_phi_uses_[i];
+  }
+}
+
+
+void HPhi::AddIndirectUsesTo(int* dest) {
+  for (int i = 0; i < Representation::kNumRepresentations; i++) {
+    dest[i] += indirect_uses_[i];
+  }
+}
+
+
+void HSimulate::PrintDataTo(StringStream* stream) const {
+  stream->Add("id=%d ", ast_id());
+  if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
+  if (values_.length() > 0) {
+    if (pop_count_ > 0) stream->Add(" /");
+    for (int i = 0; i < values_.length(); ++i) {
+      if (!HasAssignedIndexAt(i)) {
+        stream->Add(" push ");
+      } else {
+        stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
+      }
+      values_[i]->PrintNameTo(stream);
+    }
+  }
+}
+
+
+void HEnterInlined::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name = function()->debug_name()->ToCString();
+  stream->Add("%s, id=%d", *name, function()->id());
+}
+
+
+HConstant::HConstant(Handle<Object> handle, Representation r)
+    : handle_(handle),
+      constant_type_(HType::TypeFromValue(handle)),
+      has_int32_value_(false),
+      int32_value_(0),
+      has_double_value_(false),
+      double_value_(0)  {
+  set_representation(r);
+  SetFlag(kUseGVN);
+  if (handle_->IsNumber()) {
+    double n = handle_->Number();
+    has_int32_value_ = static_cast<double>(static_cast<int32_t>(n)) == n;
+    if (has_int32_value_) int32_value_ = static_cast<int32_t>(n);
+    double_value_ = n;
+    has_double_value_ = true;
+  }
+}
+
+
+HConstant* HConstant::CopyToRepresentation(Representation r) const {
+  if (r.IsInteger32() && !has_int32_value_) return NULL;
+  if (r.IsDouble() && !has_double_value_) return NULL;
+  return new HConstant(handle_, r);
+}
+
+
+HConstant* HConstant::CopyToTruncatedInt32() const {
+  if (!has_double_value_) return NULL;
+  int32_t truncated = NumberToInt32(*handle_);
+  return new HConstant(Factory::NewNumberFromInt(truncated),
+                       Representation::Integer32());
+}
+
+
+void HConstant::PrintDataTo(StringStream* stream) const {
+  handle()->ShortPrint(stream);
+}
+
+
+bool HArrayLiteral::IsCopyOnWrite() const {
+  return constant_elements()->map() == Heap::fixed_cow_array_map();
+}
+
+
+void HBinaryOperation::PrintDataTo(StringStream* stream) const {
+  left()->PrintNameTo(stream);
+  stream->Add(" ");
+  right()->PrintNameTo(stream);
+  if (CheckFlag(kCanOverflow)) stream->Add(" !");
+  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+}
+
+
+Range* HBitAnd::InferRange() {
+  Range* a = left()->range();
+  Range* b = right()->range();
+  int32_t a_mask = 0xffffffff;
+  int32_t b_mask = 0xffffffff;
+  if (a != NULL) a_mask = a->Mask();
+  if (b != NULL) b_mask = b->Mask();
+  int32_t result_mask = a_mask & b_mask;
+  if (result_mask >= 0) {
+    return new Range(0, result_mask);
+  } else {
+    return HBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HBitOr::InferRange() {
+  Range* a = left()->range();
+  Range* b = right()->range();
+  int32_t a_mask = 0xffffffff;
+  int32_t b_mask = 0xffffffff;
+  if (a != NULL) a_mask = a->Mask();
+  if (b != NULL) b_mask = b->Mask();
+  int32_t result_mask = a_mask | b_mask;
+  if (result_mask >= 0) {
+    return new Range(0, result_mask);
+  } else {
+    return HBinaryOperation::InferRange();
+  }
+}
+
+
+Range* HSar::InferRange() {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      int32_t val = c->Integer32Value();
+      Range* result = NULL;
+      Range* left_range = left()->range();
+      if (left_range == NULL) {
+        result = new Range();
+      } else {
+        result = left_range->Copy();
+      }
+      result->Sar(val);
+      return result;
+    }
+  }
+
+  return HBinaryOperation::InferRange();
+}
+
+
+Range* HShl::InferRange() {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      int32_t val = c->Integer32Value();
+      Range* result = NULL;
+      Range* left_range = left()->range();
+      if (left_range == NULL) {
+        result = new Range();
+      } else {
+        result = left_range->Copy();
+      }
+      result->Shl(val);
+      return result;
+    }
+  }
+
+  return HBinaryOperation::InferRange();
+}
+
+
+
+void HCompare::PrintDataTo(StringStream* stream) const {
+  stream->Add(Token::Name(token()));
+  stream->Add(" ");
+  HBinaryOperation::PrintDataTo(stream);
+}
+
+
+void HCompare::SetInputRepresentation(Representation r) {
+  input_representation_ = r;
+  if (r.IsTagged()) {
+    SetFlagMask(AllSideEffects());
+    ClearFlag(kUseGVN);
+  } else {
+    ClearFlagMask(AllSideEffects());
+    SetFlag(kUseGVN);
+  }
+}
+
+
+void HParameter::PrintDataTo(StringStream* stream) const {
+  stream->Add("%u", index());
+}
+
+
+void HLoadNamedField::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
+}
+
+
+void HLoadKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("]");
+}
+
+
+void HStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add(".");
+  ASSERT(name()->IsString());
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" = ");
+  value()->PrintNameTo(stream);
+}
+
+
+void HStoreNamedField::PrintDataTo(StringStream* stream) const {
+  HStoreNamed::PrintDataTo(stream);
+  if (!transition().is_null()) {
+    stream->Add(" (transition map %p)", *transition());
+  }
+}
+
+
+void HStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintNameTo(stream);
+  stream->Add("[");
+  key()->PrintNameTo(stream);
+  stream->Add("] = ");
+  value()->PrintNameTo(stream);
+}
+
+
+void HLoadGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("[%p]", *cell());
+  if (check_hole_value()) stream->Add(" (deleteable/read-only)");
+}
+
+
+void HStoreGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("[%p] = ", *cell());
+  value()->PrintNameTo(stream);
+}
+
+
+// Implementation of type inference and type conversions. Calculates
+// the inferred type of this instruction based on the input operands.
+
+HType HValue::CalculateInferredType() const {
+  return type_;
+}
+
+
+HType HCheckMap::CalculateInferredType() const {
+  return value()->type();
+}
+
+
+HType HCheckFunction::CalculateInferredType() const {
+  return value()->type();
+}
+
+
+HType HCheckNonSmi::CalculateInferredType() const {
+  // TODO(kasperl): Is there any way to signal that this isn't a smi?
+  return HType::Tagged();
+}
+
+
+HType HCheckSmi::CalculateInferredType() const {
+  return HType::Smi();
+}
+
+
+HType HPhi::CalculateInferredType() const {
+  HType result = HType::Uninitialized();
+  for (int i = 0; i < OperandCount(); ++i) {
+    HType current = OperandAt(i)->type();
+    result = result.Combine(current);
+  }
+  return result;
+}
+
+
+HType HConstant::CalculateInferredType() const {
+  return constant_type_;
+}
+
+
+HType HCompare::CalculateInferredType() const {
+  return HType::Boolean();
+}
+
+
+HType HCompareJSObjectEq::CalculateInferredType() const {
+  return HType::Boolean();
+}
+
+
+HType HUnaryPredicate::CalculateInferredType() const {
+  return HType::Boolean();
+}
+
+
+HType HArithmeticBinaryOperation::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HAdd::CalculateInferredType() const {
+  return HType::Tagged();
+}
+
+
+HType HBitAnd::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitXor::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitOr::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitNot::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HUnaryMathOperation::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HShl::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HShr::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HType HSar::CalculateInferredType() const {
+  return HType::TaggedNumber();
+}
+
+
+HValue* HUnaryMathOperation::EnsureAndPropagateNotMinusZero(
+    BitVector* visited) {
+  visited->Add(id());
+  if (representation().IsInteger32() &&
+      !value()->representation().IsInteger32()) {
+    if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
+      SetFlag(kBailoutOnMinusZero);
+    }
+  }
+  if (RequiredInputRepresentation(0).IsInteger32() &&
+      representation().IsInteger32()) {
+    return value();
+  }
+  return NULL;
+}
+
+
+
+HValue* HChange::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (from().IsInteger32()) return NULL;
+  if (CanTruncateToInt32()) return NULL;
+  if (value()->range() == NULL || value()->range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+  }
+  ASSERT(!from().IsInteger32() || !to().IsInteger32());
+  return NULL;
+}
+
+
+HValue* HMod::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+    return left();
+  }
+  return NULL;
+}
+
+
+HValue* HDiv::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+  }
+  return NULL;
+}
+
+
+HValue* HMul::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    SetFlag(kBailoutOnMinusZero);
+  }
+  return NULL;
+}
+
+
+HValue* HSub::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  // Propagate to the left argument. If the left argument cannot be -0, then
+  // the result of the add operation cannot be either.
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    return left();
+  }
+  return NULL;
+}
+
+
+HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
+  visited->Add(id());
+  // Propagate to the left argument. If the left argument cannot be -0, then
+  // the result of the sub operation cannot be either.
+  if (range() == NULL || range()->CanBeMinusZero()) {
+    return left();
+  }
+  return NULL;
+}
+
+
+// Node-specific verification code is only included in debug mode.
+#ifdef DEBUG
+
+void HPhi::Verify() const {
+  ASSERT(OperandCount() == block()->predecessors()->length());
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* value = OperandAt(i);
+    HBasicBlock* defining_block = value->block();
+    HBasicBlock* predecessor_block = block()->predecessors()->at(i);
+    ASSERT(defining_block == predecessor_block ||
+           defining_block->Dominates(predecessor_block));
+  }
+}
+
+
+void HSimulate::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasAstId());
+}
+
+
+void HBoundsCheck::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckSmi::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckNonSmi::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckInstanceType::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckMap::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckFunction::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+
+void HCheckPrototypeMaps::Verify() const {
+  HInstruction::Verify();
+  ASSERT(HasNoUses());
+}
+
+#endif
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
new file mode 100644
index 0000000..cbbe8fc
--- /dev/null
+++ b/src/hydrogen-instructions.h
@@ -0,0 +1,2953 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_INSTRUCTIONS_H_
+#define V8_HYDROGEN_INSTRUCTIONS_H_
+
+#include "v8.h"
+#include "code-stubs.h"
+#include "string-stream.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HBasicBlock;
+class HEnvironment;
+class HInstruction;
+class HLoopInformation;
+class HValue;
+class LInstruction;
+class LChunkBuilder;
+
+
+// Type hierarchy:
+//
+// HValue
+//   HInstruction
+//     HAccessArgumentsAt
+//     HApplyArguments
+//     HArgumentsElements
+//     HArgumentsLength
+//     HArgumentsObject
+//     HBinaryOperation
+//       HArithmeticBinaryOperation
+//         HAdd
+//         HDiv
+//         HMod
+//         HMul
+//         HSub
+//       HBitwiseBinaryOperation
+//         HBitAnd
+//         HBitOr
+//         HBitXor
+//         HSar
+//         HShl
+//         HShr
+//       HBoundsCheck
+//       HCompare
+//       HCompareJSObjectEq
+//       HInstanceOf
+//       HLoadKeyed
+//         HLoadKeyedFastElement
+//         HLoadKeyedGeneric
+//       HLoadNamedGeneric
+//       HPower
+//       HStoreNamed
+//         HStoreNamedField
+//         HStoreNamedGeneric
+//     HBlockEntry
+//     HCall
+//       HCallConstantFunction
+//       HCallFunction
+//       HCallGlobal
+//       HCallKeyed
+//       HCallKnownGlobal
+//       HCallNamed
+//       HCallNew
+//       HCallRuntime
+//     HCallStub
+//     HConstant
+//     HControlInstruction
+//       HDeoptimize
+//       HGoto
+//       HUnaryControlInstruction
+//         HBranch
+//         HCompareMapAndBranch
+//         HReturn
+//         HThrow
+//     HEnterInlined
+//     HFunctionLiteral
+//     HGlobalObject
+//     HGlobalReceiver
+//     HLeaveInlined
+//     HLoadGlobal
+//     HMaterializedLiteral
+//       HArrayLiteral
+//       HObjectLiteral
+//       HRegExpLiteral
+//     HOsrEntry
+//     HParameter
+//     HSimulate
+//     HStackCheck
+//     HStoreKeyed
+//       HStoreKeyedFastElement
+//       HStoreKeyedGeneric
+//     HUnaryOperation
+//       HArrayLength
+//       HBitNot
+//       HChange
+//       HCheckFunction
+//       HCheckInstanceType
+//       HCheckMap
+//       HCheckNonSmi
+//       HCheckPrototypeMaps
+//       HCheckSmi
+//       HDeleteProperty
+//       HLoadElements
+//         HTypeofIs
+//       HLoadNamedField
+//       HPushArgument
+//       HTypeof
+//       HUnaryMathOperation
+//       HUnaryPredicate
+//         HClassOfTest
+//         HHasCachedArrayIndex
+//         HHasInstanceType
+//         HIsNull
+//         HIsObject
+//         HIsSmi
+//       HValueOf
+//     HUnknownOSRValue
+//   HPhi
+
+#define HYDROGEN_ALL_INSTRUCTION_LIST(V)       \
+  V(ArithmeticBinaryOperation)                 \
+  V(BinaryOperation)                           \
+  V(BitwiseBinaryOperation)                    \
+  V(Call)                                      \
+  V(ControlInstruction)                        \
+  V(Instruction)                               \
+  V(LoadKeyed)                                 \
+  V(MaterializedLiteral)                       \
+  V(Phi)                                       \
+  V(StoreKeyed)                                \
+  V(StoreNamed)                                \
+  V(UnaryControlInstruction)                   \
+  V(UnaryOperation)                            \
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)  \
+  V(AccessArgumentsAt)                         \
+  V(Add)                                       \
+  V(ApplyArguments)                            \
+  V(ArgumentsElements)                         \
+  V(ArgumentsLength)                           \
+  V(ArgumentsObject)                           \
+  V(ArrayLength)                               \
+  V(ArrayLiteral)                              \
+  V(BitAnd)                                    \
+  V(BitNot)                                    \
+  V(BitOr)                                     \
+  V(BitXor)                                    \
+  V(BlockEntry)                                \
+  V(BoundsCheck)                               \
+  V(Branch)                                    \
+  V(CallConstantFunction)                      \
+  V(CallFunction)                              \
+  V(CallGlobal)                                \
+  V(CallKeyed)                                 \
+  V(CallKnownGlobal)                           \
+  V(CallNamed)                                 \
+  V(CallNew)                                   \
+  V(CallRuntime)                               \
+  V(CallStub)                                  \
+  V(Change)                                    \
+  V(CheckFunction)                             \
+  V(CheckInstanceType)                         \
+  V(CheckMap)                                  \
+  V(CheckNonSmi)                               \
+  V(CheckPrototypeMaps)                        \
+  V(CheckSmi)                                  \
+  V(Compare)                                   \
+  V(CompareJSObjectEq)                         \
+  V(CompareMapAndBranch)                       \
+  V(Constant)                                  \
+  V(DeleteProperty)                            \
+  V(Deoptimize)                                \
+  V(Div)                                       \
+  V(EnterInlined)                              \
+  V(FunctionLiteral)                           \
+  V(GlobalObject)                              \
+  V(GlobalReceiver)                            \
+  V(Goto)                                      \
+  V(InstanceOf)                                \
+  V(IsNull)                                    \
+  V(IsObject)                                  \
+  V(IsSmi)                                     \
+  V(HasInstanceType)                           \
+  V(HasCachedArrayIndex)                       \
+  V(ClassOfTest)                               \
+  V(LeaveInlined)                              \
+  V(LoadElements)                              \
+  V(LoadGlobal)                                \
+  V(LoadKeyedFastElement)                      \
+  V(LoadKeyedGeneric)                          \
+  V(LoadNamedField)                            \
+  V(LoadNamedGeneric)                          \
+  V(Mod)                                       \
+  V(Mul)                                       \
+  V(ObjectLiteral)                             \
+  V(OsrEntry)                                  \
+  V(Parameter)                                 \
+  V(Power)                                     \
+  V(PushArgument)                              \
+  V(RegExpLiteral)                             \
+  V(Return)                                    \
+  V(Sar)                                       \
+  V(Shl)                                       \
+  V(Shr)                                       \
+  V(Simulate)                                  \
+  V(StackCheck)                                \
+  V(StoreGlobal)                               \
+  V(StoreKeyedFastElement)                     \
+  V(StoreKeyedGeneric)                         \
+  V(StoreNamedField)                           \
+  V(StoreNamedGeneric)                         \
+  V(Sub)                                       \
+  V(Throw)                                     \
+  V(Typeof)                                    \
+  V(TypeofIs)                                  \
+  V(UnaryMathOperation)                        \
+  V(UnknownOSRValue)                           \
+  V(ValueOf)
+
+#define GVN_FLAG_LIST(V)                       \
+  V(Calls)                                     \
+  V(InobjectFields)                            \
+  V(BackingStoreFields)                        \
+  V(ArrayElements)                             \
+  V(GlobalVars)                                \
+  V(Maps)                                      \
+  V(ArrayLengths)                              \
+  V(OsrEntries)
+
+#define DECLARE_INSTRUCTION(type)                   \
+  virtual bool Is##type() const { return true; }    \
+  static H##type* cast(HValue* value) {             \
+    ASSERT(value->Is##type());                      \
+    return reinterpret_cast<H##type*>(value);       \
+  }                                                 \
+  Opcode opcode() const { return HValue::k##type; }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
+  virtual LInstruction* CompileToLithium(LChunkBuilder* builder); \
+  virtual const char* Mnemonic() const { return mnemonic; }       \
+  DECLARE_INSTRUCTION(type)
+
+
+
+template<int kSize>
+class HOperandVector : public EmbeddedVector<HValue*, kSize> {
+ public:
+  HOperandVector() : EmbeddedVector<HValue*, kSize>(NULL) { }
+};
+
+
+class Range: public ZoneObject {
+ public:
+  Range() : lower_(kMinInt),
+            upper_(kMaxInt),
+            next_(NULL),
+            can_be_minus_zero_(false) { }
+
+  Range(int32_t lower, int32_t upper)
+      : lower_(lower), upper_(upper), next_(NULL), can_be_minus_zero_(false) { }
+
+  bool IsInSmiRange() const {
+    return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
+  }
+  void KeepOrder();
+  void Verify() const;
+  int32_t upper() const { return upper_; }
+  int32_t lower() const { return lower_; }
+  Range* next() const { return next_; }
+  Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
+  Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
+  void ClearLower() { lower_ = kMinInt; }
+  void ClearUpper() { upper_ = kMaxInt; }
+  Range* Copy() const { return new Range(lower_, upper_); }
+  bool IsMostGeneric() const { return lower_ == kMinInt && upper_ == kMaxInt; }
+  int32_t Mask() const;
+  void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
+  bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
+  bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
+  bool CanBeNegative() const { return lower_ < 0; }
+  bool Includes(int value) const {
+    return lower_ <= value && upper_ >= value;
+  }
+
+  void Sar(int32_t value) {
+    int32_t bits = value & 0x1F;
+    lower_ = lower_ >> bits;
+    upper_ = upper_ >> bits;
+    set_can_be_minus_zero(false);
+  }
+
+  void Shl(int32_t value) {
+    int32_t bits = value & 0x1F;
+    int old_lower = lower_;
+    int old_upper = upper_;
+    lower_ = lower_ << bits;
+    upper_ = upper_ << bits;
+    if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
+      upper_ = kMaxInt;
+      lower_ = kMinInt;
+    }
+    set_can_be_minus_zero(false);
+  }
+
+  // Adds a constant to the lower and upper bound of the range.
+  void AddConstant(int32_t value);
+
+  void StackUpon(Range* other) {
+    Intersect(other);
+    next_ = other;
+  }
+
+  void Intersect(Range* other) {
+    upper_ = Min(upper_, other->upper_);
+    lower_ = Max(lower_, other->lower_);
+    bool b = CanBeMinusZero() && other->CanBeMinusZero();
+    set_can_be_minus_zero(b);
+  }
+
+  void Union(Range* other) {
+    upper_ = Max(upper_, other->upper_);
+    lower_ = Min(lower_, other->lower_);
+    bool b = CanBeMinusZero() || other->CanBeMinusZero();
+    set_can_be_minus_zero(b);
+  }
+
+  // Compute a new result range and return true, if the operation
+  // can overflow.
+  bool AddAndCheckOverflow(Range* other);
+  bool SubAndCheckOverflow(Range* other);
+  bool MulAndCheckOverflow(Range* other);
+
+ private:
+  int32_t lower_;
+  int32_t upper_;
+  Range* next_;
+  bool can_be_minus_zero_;
+};
+
+
+class Representation {
+ public:
+  enum Kind {
+    kNone,
+    kTagged,
+    kDouble,
+    kInteger32,
+    kNumRepresentations
+  };
+
+  Representation() : kind_(kNone) { }
+
+  static Representation None() { return Representation(kNone); }
+  static Representation Tagged() { return Representation(kTagged); }
+  static Representation Integer32() { return Representation(kInteger32); }
+  static Representation Double() { return Representation(kDouble); }
+
+  bool Equals(const Representation& other) const {
+    return kind_ == other.kind_;
+  }
+
+  Kind kind() const { return kind_; }
+  bool IsNone() const { return kind_ == kNone; }
+  bool IsTagged() const { return kind_ == kTagged; }
+  bool IsInteger32() const { return kind_ == kInteger32; }
+  bool IsDouble() const { return kind_ == kDouble; }
+  bool IsSpecialization() const {
+    return kind_ == kInteger32 || kind_ == kDouble;
+  }
+  const char* Mnemonic() const;
+
+ private:
+  explicit Representation(Kind k) : kind_(k) { }
+
+  Kind kind_;
+};
+
+
+class HType {
+ public:
+  HType() : type_(kUninitialized) { }
+
+  static HType Tagged() { return HType(kTagged); }
+  static HType TaggedPrimitive() { return HType(kTaggedPrimitive); }
+  static HType TaggedNumber() { return HType(kTaggedNumber); }
+  static HType Smi() { return HType(kSmi); }
+  static HType HeapNumber() { return HType(kHeapNumber); }
+  static HType String() { return HType(kString); }
+  static HType Boolean() { return HType(kBoolean); }
+  static HType NonPrimitive() { return HType(kNonPrimitive); }
+  static HType JSArray() { return HType(kJSArray); }
+  static HType JSObject() { return HType(kJSObject); }
+  static HType Uninitialized() { return HType(kUninitialized); }
+
+  // Return the weakest (least precise) common type.
+  HType Combine(HType other) {
+    return HType(static_cast<Type>(type_ & other.type_));
+  }
+
+  bool Equals(const HType& other) {
+    return type_ == other.type_;
+  }
+
+  bool IsSubtypeOf(const HType& other) {
+    return Combine(other).Equals(other);
+  }
+
+  bool IsTagged() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kTagged) == kTagged);
+  }
+
+  bool IsTaggedPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kTaggedPrimitive) == kTaggedPrimitive);
+  }
+
+  bool IsTaggedNumber() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kTaggedNumber) == kTaggedNumber);
+  }
+
+  bool IsSmi() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSmi) == kSmi);
+  }
+
+  bool IsHeapNumber() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kHeapNumber) == kHeapNumber);
+  }
+
+  bool IsString() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kString) == kString);
+  }
+
+  bool IsBoolean() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kBoolean) == kBoolean);
+  }
+
+  bool IsNonPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kNonPrimitive) == kNonPrimitive);
+  }
+
+  bool IsJSArray() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kJSArray) == kJSArray);
+  }
+
+  bool IsJSObject() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kJSObject) == kJSObject);
+  }
+
+  bool IsUninitialized() {
+    return type_ == kUninitialized;
+  }
+
+  static HType TypeFromValue(Handle<Object> value);
+
+  const char* ToString();
+  const char* ToShortString();
+
+ private:
+  enum Type {
+    kTagged = 0x1,           // 0000 0000 0000 0001
+    kTaggedPrimitive = 0x5,  // 0000 0000 0000 0101
+    kTaggedNumber = 0xd,     // 0000 0000 0000 1101
+    kSmi = 0x1d,             // 0000 0000 0001 1101
+    kHeapNumber = 0x2d,      // 0000 0000 0010 1101
+    kString = 0x45,          // 0000 0000 0100 0101
+    kBoolean = 0x85,         // 0000 0000 1000 0101
+    kNonPrimitive = 0x101,   // 0000 0001 0000 0001
+    kJSObject = 0x301,       // 0000 0011 0000 0001
+    kJSArray = 0x701,        // 0000 0111 1000 0001
+    kUninitialized = 0x1fff  // 0001 1111 1111 1111
+  };
+
+  explicit HType(Type t) : type_(t) { }
+
+  Type type_;
+};
+
+
+class HValue: public ZoneObject {
+ public:
+  static const int kNoNumber = -1;
+
+  // There must be one corresponding kDepends flag for every kChanges flag and
+  // the order of the kChanges flags must be exactly the same as of the kDepends
+  // flags.
+  enum Flag {
+    // Declare global value numbering flags.
+  #define DECLARE_DO(type) kChanges##type, kDependsOn##type,
+    GVN_FLAG_LIST(DECLARE_DO)
+  #undef DECLARE_DO
+    kFlexibleRepresentation,
+    kUseGVN,
+    kCanOverflow,
+    kBailoutOnMinusZero,
+    kCanBeDivByZero,
+    kIsArguments,
+    kTruncatingToInt32,
+    kLastFlag = kTruncatingToInt32
+  };
+
+  STATIC_ASSERT(kLastFlag < kBitsPerInt);
+
+  static const int kChangesToDependsFlagsLeftShift = 1;
+
+  static int ChangesFlagsMask() {
+    int result = 0;
+    // Create changes mask.
+#define DECLARE_DO(type) result |= (1 << kChanges##type);
+  GVN_FLAG_LIST(DECLARE_DO)
+#undef DECLARE_DO
+    return result;
+  }
+
+  static int DependsFlagsMask() {
+    return ConvertChangesToDependsFlags(ChangesFlagsMask());
+  }
+
+  static int ConvertChangesToDependsFlags(int flags) {
+    return flags << kChangesToDependsFlagsLeftShift;
+  }
+
+  // A flag mask to mark an instruction as having arbitrary side effects.
+  static int AllSideEffects() {
+    return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
+  }
+
+  static HValue* cast(HValue* value) { return value; }
+
+  enum Opcode {
+    // Declare a unique enum value for each hydrogen instruction.
+  #define DECLARE_DO(type) k##type,
+    HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
+  #undef DECLARE_DO
+    kMaxInstructionClass
+  };
+
+  HValue() : block_(NULL),
+             id_(kNoNumber),
+             uses_(2),
+             type_(HType::Tagged()),
+             range_(NULL),
+             flags_(0) {}
+  virtual ~HValue() {}
+
+  HBasicBlock* block() const { return block_; }
+  void SetBlock(HBasicBlock* block);
+
+  int id() const { return id_; }
+  void set_id(int id) { id_ = id; }
+
+  const ZoneList<HValue*>* uses() const { return &uses_; }
+
+  virtual bool EmitAtUses() const { return false; }
+  Representation representation() const { return representation_; }
+  void ChangeRepresentation(Representation r) {
+    // Representation was already set and is allowed to be changed.
+    ASSERT(!representation_.IsNone());
+    ASSERT(!r.IsNone());
+    ASSERT(CheckFlag(kFlexibleRepresentation));
+    RepresentationChanged(r);
+    representation_ = r;
+  }
+
+  HType type() const { return type_; }
+  void set_type(HType type) {
+    ASSERT(uses_.length() == 0);
+    type_ = type;
+  }
+
+  // An operation needs to override this function iff:
+  //   1) it can produce an int32 output.
+  //   2) the true value of its output can potentially be minus zero.
+  // The implementation must set a flag so that it bails out in the case where
+  // it would otherwise output what should be a minus zero as an int32 zero.
+  // If the operation also exists in a form that takes int32 and outputs int32
+  // then the operation should return its input value so that we can propagate
+  // back.  There are two operations that need to propagate back to more than
+  // one input.  They are phi and binary add.  They always return NULL and
+  // expect the caller to take care of things.
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited) {
+    visited->Add(id());
+    return NULL;
+  }
+
+  bool HasSideEffects() const {
+    return (flags_ & AllSideEffects()) != 0;
+  }
+  bool IsDefinedAfter(HBasicBlock* other) const;
+
+  // Operands.
+  virtual int OperandCount() const { return 0; }
+  virtual HValue* OperandAt(int index) const {
+    UNREACHABLE();
+    return NULL;
+  }
+  void SetOperandAt(int index, HValue* value);
+
+  int LookupOperandIndex(int occurrence_index, HValue* op) const;
+  bool UsesMultipleTimes(HValue* op) const;
+
+  void ReplaceAndDelete(HValue* other);
+  void ReplaceValue(HValue* other);
+  void ReplaceAtUse(HValue* use, HValue* other);
+  void ReplaceFirstAtUse(HValue* use, HValue* other, Representation r);
+  bool HasNoUses() const { return uses_.is_empty(); }
+  void ClearOperands();
+  void Delete();
+
+  int flags() const { return flags_; }
+  void SetFlagMask(int mask) { flags_ |= mask; }
+  void SetFlag(Flag f) { SetFlagMask(1 << f); }
+  void ClearFlagMask(int mask) { flags_ &= ~mask; }
+  void ClearFlag(Flag f) { ClearFlagMask(1 << f); }
+  bool CheckFlag(Flag f) const { return CheckFlagMask(1 << f); }
+  bool CheckFlagMask(int mask) const { return (flags_ & mask) != 0; }
+
+  Range* range() const { return range_; }
+  bool HasRange() const { return range_ != NULL; }
+  void AddNewRange(Range* r);
+  void RemoveLastAddedRange();
+  void ComputeInitialRange();
+
+  // Representation helpers.
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+  virtual Representation InferredRepresentation() const {
+    return representation();
+  }
+
+  // This gives the instruction an opportunity to replace itself with an
+  // instruction that does the same in some better way.  To replace an
+  // instruction with a new one, first add the new instruction to the graph,
+  // then return it.  Return NULL to have the instruction deleted.
+  virtual HValue* Canonicalize() { return this; }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  HYDROGEN_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  bool Equals(HValue* other) const;
+  virtual intptr_t Hashcode() const;
+
+  // Printing support.
+  virtual void PrintTo(StringStream* stream) const = 0;
+  void PrintNameTo(StringStream* stream);
+  static void PrintTypeTo(HType type, StringStream* stream);
+
+  virtual const char* Mnemonic() const = 0;
+  virtual Opcode opcode() const = 0;
+
+  // Updated the inferred type of this instruction and returns true if
+  // it has changed.
+  bool UpdateInferredType();
+
+  virtual HType CalculateInferredType() const;
+
+  // Helper for type conversions used by normal and phi instructions.
+  void InsertInputConversion(HInstruction* previous, int index, HType type);
+
+#ifdef DEBUG
+  virtual void Verify() const = 0;
+#endif
+
+ protected:
+  virtual bool DataEquals(HValue* other) const { return true; }
+  virtual void RepresentationChanged(Representation to) { }
+  virtual Range* InferRange();
+  virtual void DeleteFromGraph() = 0;
+  virtual void InternalSetOperandAt(int index, HValue* value) { UNREACHABLE(); }
+  void clear_block() {
+    ASSERT(block_ != NULL);
+    block_ = NULL;
+  }
+
+  void set_representation(Representation r) {
+    // Representation is set-once.
+    ASSERT(representation_.IsNone() && !r.IsNone());
+    representation_ = r;
+  }
+
+ private:
+  void InternalReplaceAtUse(HValue* use, HValue* other);
+  void RegisterUse(int index, HValue* new_value);
+
+  HBasicBlock* block_;
+
+  // The id of this instruction in the hydrogen graph, assigned when first
+  // added to the graph. Reflects creation order.
+  int id_;
+
+  Representation representation_;
+  ZoneList<HValue*> uses_;
+  HType type_;
+  Range* range_;
+  int flags_;
+
+  DISALLOW_COPY_AND_ASSIGN(HValue);
+};
+
+
+class HInstruction: public HValue {
+ public:
+  HInstruction* next() const { return next_; }
+  HInstruction* previous() const { return previous_; }
+
+  void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const {}
+
+  bool IsLinked() const { return block() != NULL; }
+  void Unlink();
+  void InsertBefore(HInstruction* next);
+  void InsertAfter(HInstruction* previous);
+
+  int position() const { return position_; }
+  bool has_position() const { return position_ != RelocInfo::kNoPosition; }
+  void set_position(int position) { position_ = position; }
+
+  virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_INSTRUCTION(Instruction)
+
+ protected:
+  HInstruction()
+      : next_(NULL),
+        previous_(NULL),
+        position_(RelocInfo::kNoPosition) {
+    SetFlag(kDependsOnOsrEntries);
+  }
+
+  virtual void DeleteFromGraph() { Unlink(); }
+
+ private:
+  void InitializeAsFirst(HBasicBlock* block) {
+    ASSERT(!IsLinked());
+    SetBlock(block);
+  }
+
+  HInstruction* next_;
+  HInstruction* previous_;
+  int position_;
+
+  friend class HBasicBlock;
+};
+
+
+class HBlockEntry: public HInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(BlockEntry, "block_entry")
+};
+
+
+class HControlInstruction: public HInstruction {
+ public:
+  virtual HBasicBlock* FirstSuccessor() const { return NULL; }
+  virtual HBasicBlock* SecondSuccessor() const { return NULL; }
+
+  DECLARE_INSTRUCTION(ControlInstruction)
+};
+
+
+class HDeoptimize: public HControlInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class HGoto: public HControlInstruction {
+ public:
+  explicit HGoto(HBasicBlock* destination)
+      : destination_(destination),
+        include_stack_check_(false) {}
+
+  virtual HBasicBlock* FirstSuccessor() const { return destination_; }
+  void set_include_stack_check(bool include_stack_check) {
+    include_stack_check_ = include_stack_check;
+  }
+  bool include_stack_check() const { return include_stack_check_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+
+ private:
+  HBasicBlock* destination_;
+  bool include_stack_check_;
+};
+
+
+class HUnaryControlInstruction: public HControlInstruction {
+ public:
+  explicit HUnaryControlInstruction(HValue* value) {
+    SetOperandAt(0, value);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* value() const { return OperandAt(0); }
+  virtual int OperandCount() const { return 1; }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_INSTRUCTION(UnaryControlInstruction)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<1> operands_;
+};
+
+
+class HBranch: public HUnaryControlInstruction {
+ public:
+  HBranch(HBasicBlock* true_destination,
+          HBasicBlock* false_destination,
+          HValue* boolean_value)
+      : HUnaryControlInstruction(boolean_value),
+        true_destination_(true_destination),
+        false_destination_(false_destination) {
+    ASSERT(true_destination != NULL && false_destination != NULL);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
+  virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+
+ private:
+  HBasicBlock* true_destination_;
+  HBasicBlock* false_destination_;
+};
+
+
+class HCompareMapAndBranch: public HUnaryControlInstruction {
+ public:
+  HCompareMapAndBranch(HValue* result,
+                       Handle<Map> map,
+                       HBasicBlock* true_destination,
+                       HBasicBlock* false_destination)
+      : HUnaryControlInstruction(result),
+        map_(map),
+        true_destination_(true_destination),
+        false_destination_(false_destination) {
+    ASSERT(true_destination != NULL);
+    ASSERT(false_destination != NULL);
+    ASSERT(!map.is_null());
+  }
+
+  virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
+  virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<Map> map() const { return map_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMapAndBranch, "compare_map_and_branch")
+
+ private:
+  Handle<Map> map_;
+  HBasicBlock* true_destination_;
+  HBasicBlock* false_destination_;
+};
+
+
+class HReturn: public HUnaryControlInstruction {
+ public:
+  explicit HReturn(HValue* result) : HUnaryControlInstruction(result) { }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class HThrow: public HUnaryControlInstruction {
+ public:
+  explicit HThrow(HValue* value) : HUnaryControlInstruction(value) { }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class HUnaryOperation: public HInstruction {
+ public:
+  explicit HUnaryOperation(HValue* value) {
+    SetOperandAt(0, value);
+  }
+
+  HValue* value() const { return OperandAt(0); }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual int OperandCount() const { return 1; }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<1> operands_;
+};
+
+
+class HChange: public HUnaryOperation {
+ public:
+  HChange(HValue* value,
+          Representation from,
+          Representation to)
+      : HUnaryOperation(value), from_(from), to_(to) {
+    ASSERT(!from.IsNone() && !to.IsNone());
+    ASSERT(!from.Equals(to));
+    set_representation(to);
+    SetFlag(kUseGVN);
+
+    if (from.IsInteger32() && to.IsTagged() && value->range() != NULL &&
+        value->range()->IsInSmiRange()) {
+      set_type(HType::Smi());
+    }
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  Representation from() const { return from_; }
+  Representation to() const { return to_; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return from_;
+  }
+
+  bool CanTruncateToInt32() const {
+    for (int i = 0; i < uses()->length(); ++i) {
+      if (!uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) return false;
+    }
+    return true;
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Change,
+                               CanTruncateToInt32() ? "truncate" : "change")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    if (!other->IsChange()) return false;
+    HChange* change = HChange::cast(other);
+    return value() == change->value()
+        && to().Equals(change->to())
+        && CanTruncateToInt32() == change->CanTruncateToInt32();
+  }
+
+ private:
+  Representation from_;
+  Representation to_;
+};
+
+
+class HSimulate: public HInstruction {
+ public:
+  HSimulate(int ast_id, int pop_count, int environment_height)
+      : ast_id_(ast_id),
+        pop_count_(pop_count),
+        environment_height_(environment_height),
+        values_(2),
+        assigned_indexes_(2) {}
+  virtual ~HSimulate() {}
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool HasAstId() const { return ast_id_ != AstNode::kNoNumber; }
+  int ast_id() const { return ast_id_; }
+  void set_ast_id(int id) {
+    ASSERT(!HasAstId());
+    ast_id_ = id;
+  }
+
+  int environment_height() const { return environment_height_; }
+  int pop_count() const { return pop_count_; }
+  const ZoneList<HValue*>* values() const { return &values_; }
+  int GetAssignedIndexAt(int index) const {
+    ASSERT(HasAssignedIndexAt(index));
+    return assigned_indexes_[index];
+  }
+  bool HasAssignedIndexAt(int index) const {
+    return assigned_indexes_[index] != kNoIndex;
+  }
+  void AddAssignedValue(int index, HValue* value) {
+    AddValue(index, value);
+  }
+  void AddPushedValue(HValue* value) {
+    AddValue(kNoIndex, value);
+  }
+  virtual int OperandCount() const { return values_.length(); }
+  virtual HValue* OperandAt(int index) const { return values_[index]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Simulate, "simulate")
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    values_[index] = value;
+  }
+
+ private:
+  static const int kNoIndex = -1;
+  void AddValue(int index, HValue* value) {
+    assigned_indexes_.Add(index);
+    // Resize the list of pushed values.
+    values_.Add(NULL);
+    // Set the operand through the base method in HValue to make sure that the
+    // use lists are correctly updated.
+    SetOperandAt(values_.length() - 1, value);
+  }
+  int ast_id_;
+  int pop_count_;
+  int environment_height_;
+  ZoneList<HValue*> values_;
+  ZoneList<int> assigned_indexes_;
+};
+
+
+class HStackCheck: public HInstruction {
+ public:
+  HStackCheck() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "stack_check")
+};
+
+
+class HEnterInlined: public HInstruction {
+ public:
+  HEnterInlined(Handle<JSFunction> closure, FunctionLiteral* function)
+      : closure_(closure), function_(function) {
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> closure() const { return closure_; }
+  FunctionLiteral* function() const { return function_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(EnterInlined, "enter_inlined")
+
+ private:
+  Handle<JSFunction> closure_;
+  FunctionLiteral* function_;
+};
+
+
+class HLeaveInlined: public HInstruction {
+ public:
+  HLeaveInlined() {}
+
+  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined, "leave_inlined")
+};
+
+
+class HPushArgument: public HUnaryOperation {
+ public:
+  explicit HPushArgument(HValue* value)
+      : HUnaryOperation(value), argument_index_(-1) {
+    set_representation(Representation::Tagged());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  HValue* argument() const { return OperandAt(0); }
+  int argument_index() const { return argument_index_; }
+  void set_argument_index(int index) {
+    ASSERT(argument_index_ == -1 || index == argument_index_);
+    argument_index_ = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push_argument")
+
+ private:
+  int argument_index_;
+};
+
+
+class HGlobalObject: public HInstruction {
+ public:
+  HGlobalObject() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnCalls);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global_object")
+};
+
+
+class HGlobalReceiver: public HInstruction {
+ public:
+  HGlobalReceiver() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnCalls);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global_receiver")
+};
+
+
+class HCall: public HInstruction {
+ public:
+  // Construct a call with uninitialized arguments. The argument count
+  // includes the receiver.
+  explicit HCall(int count);
+
+  virtual HType CalculateInferredType() const { return HType::Tagged(); }
+
+  // TODO(3190496): This needs a cleanup. We don't want the arguments
+  // be operands of the call instruction. This results in bad code quality.
+  virtual int argument_count() const { return arguments_.length(); }
+  virtual int OperandCount() const { return argument_count(); }
+  virtual HValue* OperandAt(int index) const { return arguments_[index]; }
+  virtual HPushArgument* PushArgumentAt(int index) const {
+    return HPushArgument::cast(OperandAt(index));
+  }
+  virtual HValue* ArgumentAt(int index) const {
+    return PushArgumentAt(index)->argument();
+  }
+  virtual void SetArgumentAt(int index, HPushArgument* push_argument);
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_INSTRUCTION(Call)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    arguments_[index] = value;
+  }
+
+  int argument_count_;
+  Vector<HValue*> arguments_;
+};
+
+
+class HCallConstantFunction: public HCall {
+ public:
+  HCallConstantFunction(Handle<JSFunction> function, int argument_count)
+      : HCall(argument_count), function_(function) { }
+
+  Handle<JSFunction> function() const { return function_; }
+  bool IsApplyFunction() const {
+    return function_->code() == Builtins::builtin(Builtins::FunctionApply);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call_constant_function")
+
+ private:
+  Handle<JSFunction> function_;
+};
+
+
+class HCallKeyed: public HCall {
+ public:
+  HCallKeyed(HValue* key, int argument_count)
+      : HCall(argument_count + 1) {
+    SetOperandAt(0, key);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  // TODO(3190496): This is a hack to get an additional operand that
+  // is not an argument to work with the current setup. This _needs_ a cleanup.
+  // (see HCall)
+  virtual void PrintDataTo(StringStream* stream) const;
+  HValue* key() const { return OperandAt(0); }
+  virtual int argument_count() const { return arguments_.length() - 1; }
+  virtual int OperandCount() const { return arguments_.length(); }
+  virtual HValue* OperandAt(int index) const { return arguments_[index]; }
+  virtual HPushArgument* PushArgumentAt(int index) const {
+    return HPushArgument::cast(OperandAt(index + 1));
+  }
+  virtual void SetArgumentAt(int index, HPushArgument* push_argument) {
+    HCall::SetArgumentAt(index + 1, push_argument);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call_keyed")
+};
+
+
+class HCallNamed: public HCall {
+ public:
+  HCallNamed(Handle<String> name, int argument_count)
+      : HCall(argument_count), name_(name) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return name_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call_named")
+
+ private:
+  Handle<String> name_;
+};
+
+
+class HCallFunction: public HCall {
+ public:
+  explicit HCallFunction(int argument_count) : HCall(argument_count) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call_function")
+};
+
+
+class HCallGlobal: public HCall {
+ public:
+  HCallGlobal(Handle<String> name, int argument_count)
+      : HCall(argument_count), name_(name) { }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return name_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call_global")
+
+ private:
+  Handle<String> name_;
+};
+
+
+class HCallKnownGlobal: public HCall {
+ public:
+  HCallKnownGlobal(Handle<JSFunction> target,
+                   int argument_count)
+      : HCall(argument_count), target_(target) { }
+
+  Handle<JSFunction> target() const { return target_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call_known_global")
+
+ private:
+  Handle<JSFunction> target_;
+};
+
+
+class HCallNew: public HCall {
+ public:
+  explicit HCallNew(int argument_count) : HCall(argument_count) { }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* constructor() const { return ArgumentAt(0); }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call_new")
+};
+
+
+class HCallRuntime: public HCall {
+ public:
+  HCallRuntime(Handle<String> name,
+               Runtime::Function* c_function,
+               int argument_count)
+      : HCall(argument_count), c_function_(c_function), name_(name) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Runtime::Function* function() const { return c_function_; }
+  Handle<String> name() const { return name_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call_runtime")
+
+ private:
+  Runtime::Function* c_function_;
+  Handle<String> name_;
+};
+
+
+class HArrayLength: public HUnaryOperation {
+ public:
+  explicit HArrayLength(HValue* value) : HUnaryOperation(value) {
+    // The length of an array is stored as a tagged value in the array
+    // object. It is guaranteed to be 32 bit integer, but it can be
+    // represented as either a smi or heap number.
+    set_representation(Representation::Tagged());
+    SetFlag(kDependsOnArrayLengths);
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array_length")
+};
+
+
+class HBitNot: public HUnaryOperation {
+ public:
+  explicit HBitNot(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+    SetFlag(kTruncatingToInt32);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Integer32();
+  }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNot, "bit_not")
+};
+
+
+class HUnaryMathOperation: public HUnaryOperation {
+ public:
+  HUnaryMathOperation(HValue* value, BuiltinFunctionId op)
+      : HUnaryOperation(value), op_(op) {
+    switch (op) {
+      case kMathFloor:
+      case kMathRound:
+      case kMathCeil:
+        set_representation(Representation::Integer32());
+        break;
+      case kMathAbs:
+        set_representation(Representation::Tagged());
+        SetFlag(kFlexibleRepresentation);
+        break;
+      case kMathSqrt:
+      case kMathPowHalf:
+      case kMathLog:
+      case kMathSin:
+      case kMathCos:
+        set_representation(Representation::Double());
+        break;
+      default:
+        UNREACHABLE();
+    }
+    SetFlag(kUseGVN);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual HType CalculateInferredType() const;
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    switch (op_) {
+      case kMathFloor:
+      case kMathRound:
+      case kMathCeil:
+      case kMathSqrt:
+      case kMathPowHalf:
+      case kMathLog:
+      case kMathSin:
+      case kMathCos:
+        return Representation::Double();
+        break;
+      case kMathAbs:
+        return representation();
+        break;
+      default:
+        return Representation::None();
+    }
+  }
+
+  virtual HValue* Canonicalize() {
+    // If the input is integer32 then we replace the floor instruction
+    // with its inputs.  This happens before the representation changes are
+    // introduced.
+    if (op() == kMathFloor) {
+      if (value()->representation().IsInteger32()) return value();
+    }
+    return this;
+  }
+
+  BuiltinFunctionId op() const { return op_; }
+  const char* OpName() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary_math_operation")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
+    return op_ == b->op();
+  }
+
+ private:
+  BuiltinFunctionId op_;
+};
+
+
+class HLoadElements: public HUnaryOperation {
+ public:
+  explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class HCheckMap: public HUnaryOperation {
+ public:
+  HCheckMap(HValue* value, Handle<Map> map)
+      : HUnaryOperation(value), map_(map) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  Handle<Map> map() const { return map_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check_map")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCheckMap* b = HCheckMap::cast(other);
+    return map_.is_identical_to(b->map());
+  }
+
+ private:
+  Handle<Map> map_;
+};
+
+
+class HCheckFunction: public HUnaryOperation {
+ public:
+  HCheckFunction(HValue* value, Handle<JSFunction> function)
+      : HUnaryOperation(value), target_(function) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  Handle<JSFunction> target() const { return target_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check_function")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCheckFunction* b = HCheckFunction::cast(other);
+    return target_.is_identical_to(b->target());
+  }
+
+ private:
+  Handle<JSFunction> target_;
+};
+
+
+class HCheckInstanceType: public HUnaryOperation {
+ public:
+  // Check that the instance type is in the range [first, last] where
+  // both first and last are included.
+  HCheckInstanceType(HValue* value, InstanceType first, InstanceType last)
+      : HUnaryOperation(value), first_(first), last_(last) {
+    ASSERT(first <= last);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
+
+  InstanceType first() const { return first_; }
+  InstanceType last() const { return last_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check_instance_type")
+
+ protected:
+  // TODO(ager): It could be nice to allow the ommision of instance
+  // type checks if we have already performed an instance type check
+  // with a larger range.
+  virtual bool DataEquals(HValue* other) const {
+    HCheckInstanceType* b = HCheckInstanceType::cast(other);
+    return (first_ == b->first()) && (last_ == b->last());
+  }
+
+ private:
+  InstanceType first_;
+  InstanceType last_;
+};
+
+
+class HCheckNonSmi: public HUnaryOperation {
+ public:
+  explicit HCheckNonSmi(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
+};
+
+
+class HCheckPrototypeMaps: public HUnaryOperation {
+ public:
+  HCheckPrototypeMaps(HValue* value,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : HUnaryOperation(value),
+        holder_(holder),
+        receiver_map_(receiver_map) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check_prototype_maps")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
+    return holder_.is_identical_to(b->holder()) &&
+        receiver_map_.is_identical_to(b->receiver_map());
+  }
+
+ private:
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class HCheckSmi: public HUnaryOperation {
+ public:
+  explicit HCheckSmi(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType() const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check_smi")
+};
+
+
+class HPhi: public HValue {
+ public:
+  explicit HPhi(int merged_index)
+      : inputs_(2),
+        merged_index_(merged_index),
+        phi_id_(-1) {
+    for (int i = 0; i < Representation::kNumRepresentations; i++) {
+      non_phi_uses_[i] = 0;
+      indirect_uses_[i] = 0;
+    }
+    ASSERT(merged_index >= 0);
+    set_representation(Representation::Tagged());
+    SetFlag(kFlexibleRepresentation);
+  }
+
+  virtual Representation InferredRepresentation() const {
+    bool double_occurred = false;
+    bool int32_occurred = false;
+    for (int i = 0; i < OperandCount(); ++i) {
+      HValue* value = OperandAt(i);
+      if (value->representation().IsDouble()) double_occurred = true;
+      if (value->representation().IsInteger32()) int32_occurred = true;
+      if (value->representation().IsTagged()) return Representation::Tagged();
+    }
+
+    if (double_occurred) return Representation::Double();
+    if (int32_occurred) return Representation::Integer32();
+    return Representation::None();
+  }
+
+  virtual Range* InferRange();
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return representation();
+  }
+  virtual HType CalculateInferredType() const;
+  virtual int OperandCount() const { return inputs_.length(); }
+  virtual HValue* OperandAt(int index) const { return inputs_[index]; }
+  HValue* GetRedundantReplacement() const;
+  void AddInput(HValue* value);
+
+  bool HasReceiverOperand();
+
+  int merged_index() const { return merged_index_; }
+
+  virtual const char* Mnemonic() const { return "phi"; }
+
+  virtual void PrintTo(StringStream* stream) const;
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  DECLARE_INSTRUCTION(Phi)
+
+  void InitRealUses(int id);
+  void AddNonPhiUsesFrom(HPhi* other);
+  void AddIndirectUsesTo(int* use_count);
+
+  int tagged_non_phi_uses() const {
+    return non_phi_uses_[Representation::kTagged];
+  }
+  int int32_non_phi_uses() const {
+    return non_phi_uses_[Representation::kInteger32];
+  }
+  int double_non_phi_uses() const {
+    return non_phi_uses_[Representation::kDouble];
+  }
+  int tagged_indirect_uses() const {
+    return indirect_uses_[Representation::kTagged];
+  }
+  int int32_indirect_uses() const {
+    return indirect_uses_[Representation::kInteger32];
+  }
+  int double_indirect_uses() const {
+    return indirect_uses_[Representation::kDouble];
+  }
+  int phi_id() { return phi_id_; }
+
+ protected:
+  virtual void DeleteFromGraph();
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    inputs_[index] = value;
+  }
+
+ private:
+  ZoneList<HValue*> inputs_;
+  int merged_index_;
+
+  int non_phi_uses_[Representation::kNumRepresentations];
+  int indirect_uses_[Representation::kNumRepresentations];
+  int phi_id_;
+};
+
+
+class HArgumentsObject: public HInstruction {
+ public:
+  HArgumentsObject() {
+    set_representation(Representation::Tagged());
+    SetFlag(kIsArguments);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject, "arguments-object")
+};
+
+
+class HConstant: public HInstruction {
+ public:
+  HConstant(Handle<Object> handle, Representation r);
+
+  Handle<Object> handle() const { return handle_; }
+
+  virtual bool EmitAtUses() const { return !representation().IsDouble(); }
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual HType CalculateInferredType() const;
+  bool IsInteger() const { return handle_->IsSmi(); }
+  HConstant* CopyToRepresentation(Representation r) const;
+  HConstant* CopyToTruncatedInt32() const;
+  bool HasInteger32Value() const { return has_int32_value_; }
+  int32_t Integer32Value() const {
+    ASSERT(HasInteger32Value());
+    return int32_value_;
+  }
+  bool HasDoubleValue() const { return has_double_value_; }
+  double DoubleValue() const {
+    ASSERT(HasDoubleValue());
+    return double_value_;
+  }
+  bool HasStringValue() const { return handle_->IsString(); }
+
+  virtual intptr_t Hashcode() const {
+    ASSERT(!Heap::allow_allocation(false));
+    return reinterpret_cast<intptr_t>(*handle());
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const { }
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(Constant, "constant")
+
+ protected:
+  virtual Range* InferRange();
+
+  virtual bool DataEquals(HValue* other) const {
+    HConstant* other_constant = HConstant::cast(other);
+    return handle().is_identical_to(other_constant->handle());
+  }
+
+ private:
+  Handle<Object> handle_;
+  HType constant_type_;
+
+  // The following two values represent the int32 and the double value of the
+  // given constant if there is a lossless conversion between the constant
+  // and the specific representation.
+  bool has_int32_value_;
+  int32_t int32_value_;
+  bool has_double_value_;
+  double double_value_;
+};
+
+
+class HBinaryOperation: public HInstruction {
+ public:
+  HBinaryOperation(HValue* left, HValue* right) {
+    ASSERT(left != NULL && right != NULL);
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
+  }
+
+  HValue* left() const { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
+
+  // TODO(kasperl): Move these helpers to the IA-32 Lithium
+  // instruction sequence builder.
+  HValue* LeastConstantOperand() const {
+    if (IsCommutative() && left()->IsConstant()) return right();
+    return left();
+  }
+  HValue* MostConstantOperand() const {
+    if (IsCommutative() && left()->IsConstant()) return left();
+    return right();
+  }
+
+  virtual bool IsCommutative() const { return false; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<2> operands_;
+};
+
+
+class HApplyArguments: public HInstruction {
+ public:
+  HApplyArguments(HValue* function,
+                  HValue* receiver,
+                  HValue* length,
+                  HValue* elements) {
+    set_representation(Representation::Tagged());
+    SetOperandAt(0, function);
+    SetOperandAt(1, receiver);
+    SetOperandAt(2, length);
+    SetOperandAt(3, elements);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The length is untagged, all other inputs are tagged.
+    return (index == 2)
+        ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  HValue* function() const { return OperandAt(0); }
+  HValue* receiver() const { return OperandAt(1); }
+  HValue* length() const { return OperandAt(2); }
+  HValue* elements() const { return OperandAt(3); }
+
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply_arguments")
+
+
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<4> operands_;
+};
+
+
+class HArgumentsElements: public HInstruction {
+ public:
+  HArgumentsElements() {
+    // The value produced by this instruction is a pointer into the stack
+    // that looks as if it was a smi because of alignment.
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments_elements")
+};
+
+
+class HArgumentsLength: public HUnaryOperation {
+ public:
+  explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments_length")
+};
+
+
+class HAccessArgumentsAt: public HInstruction {
+ public:
+  HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetOperandAt(0, arguments);
+    SetOperandAt(1, length);
+    SetOperandAt(2, index);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The arguments elements is considered tagged.
+    return index == 0
+        ? Representation::Tagged()
+        : Representation::Integer32();
+  }
+
+  HValue* arguments() const { return operands_[0]; }
+  HValue* length() const { return operands_[1]; }
+  HValue* index() const { return operands_[2]; }
+
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access_arguments_at")
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<3> operands_;
+};
+
+
+class HBoundsCheck: public HBinaryOperation {
+ public:
+  HBoundsCheck(HValue* index, HValue* length)
+      : HBinaryOperation(index, length) {
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Integer32();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() const;
+#endif
+
+  HValue* index() const { return left(); }
+  HValue* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds_check")
+};
+
+
+class HBitwiseBinaryOperation: public HBinaryOperation {
+ public:
+  HBitwiseBinaryOperation(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    // Default to truncating, Integer32, UseGVN.
+    set_representation(Representation::Integer32());
+    SetFlag(kTruncatingToInt32);
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Integer32();
+  }
+
+  DECLARE_INSTRUCTION(BitwiseBinaryOperation)
+};
+
+
+class HArithmeticBinaryOperation: public HBinaryOperation {
+ public:
+  HArithmeticBinaryOperation(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kFlexibleRepresentation);
+    SetFlagMask(AllSideEffects());
+  }
+
+  virtual void RepresentationChanged(Representation to) {
+    if (!to.IsTagged()) {
+      ClearFlagMask(AllSideEffects());
+      SetFlag(kUseGVN);
+    }
+  }
+
+  virtual HType CalculateInferredType() const;
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return representation();
+  }
+  virtual Representation InferredRepresentation() const {
+    if (left()->representation().Equals(right()->representation())) {
+      return left()->representation();
+    }
+    return HValue::InferredRepresentation();
+  }
+
+  DECLARE_INSTRUCTION(ArithmeticBinaryOperation)
+};
+
+
+class HCompare: public HBinaryOperation {
+ public:
+  HCompare(HValue* left, HValue* right, Token::Value token)
+      : HBinaryOperation(left, right), token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  void SetInputRepresentation(Representation r);
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return input_representation_;
+  }
+  Representation GetInputRepresentation() const {
+    return input_representation_;
+  }
+  Token::Value token() const { return token_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual HType CalculateInferredType() const;
+
+  virtual intptr_t Hashcode() const {
+    return HValue::Hashcode() * 7 + token_;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Compare, "compare")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HCompare* comp = HCompare::cast(other);
+    return token_ == comp->token();
+  }
+
+ private:
+  Representation input_representation_;
+  Token::Value token_;
+};
+
+
+class HCompareJSObjectEq: public HBinaryOperation {
+ public:
+  HCompareJSObjectEq(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareJSObjectEq, "compare-js-object-eq")
+};
+
+
+class HUnaryPredicate: public HUnaryOperation {
+ public:
+  explicit HUnaryPredicate(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual HType CalculateInferredType() const;
+};
+
+
+class HIsNull: public HUnaryPredicate {
+ public:
+  HIsNull(HValue* value, bool is_strict)
+      : HUnaryPredicate(value), is_strict_(is_strict) { }
+
+  bool is_strict() const { return is_strict_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is_null")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HIsNull* b = HIsNull::cast(other);
+    return is_strict_ == b->is_strict();
+  }
+
+ private:
+  bool is_strict_;
+};
+
+
+class HIsObject: public HUnaryPredicate {
+ public:
+  explicit HIsObject(HValue* value) : HUnaryPredicate(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is_object")
+};
+
+
+class HIsSmi: public HUnaryPredicate {
+ public:
+  explicit HIsSmi(HValue* value) : HUnaryPredicate(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is_smi")
+};
+
+
+class HHasInstanceType: public HUnaryPredicate {
+ public:
+  HHasInstanceType(HValue* value, InstanceType type)
+      : HUnaryPredicate(value), from_(type), to_(type) { }
+  HHasInstanceType(HValue* value, InstanceType from, InstanceType to)
+      : HUnaryPredicate(value), from_(from), to_(to) {
+    ASSERT(to == LAST_TYPE);  // Others not implemented yet in backend.
+  }
+
+  InstanceType from() { return from_; }
+  InstanceType to() { return to_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has_instance_type")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HHasInstanceType* b = HHasInstanceType::cast(other);
+    return (from_ == b->from()) && (to_ == b->to());
+  }
+
+ private:
+  InstanceType from_;
+  InstanceType to_;  // Inclusive range, not all combinations work.
+};
+
+
+class HHasCachedArrayIndex: public HUnaryPredicate {
+ public:
+  explicit HHasCachedArrayIndex(HValue* value) : HUnaryPredicate(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has_cached_array_index")
+};
+
+
+class HClassOfTest: public HUnaryPredicate {
+ public:
+  HClassOfTest(HValue* value, Handle<String> class_name)
+      : HUnaryPredicate(value), class_name_(class_name) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class_of_test")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> class_name() const { return class_name_; }
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HClassOfTest* b = HClassOfTest::cast(other);
+    return class_name_.is_identical_to(b->class_name_);
+  }
+
+ private:
+  Handle<String> class_name_;
+};
+
+
+class HTypeofIs: public HUnaryPredicate {
+ public:
+  HTypeofIs(HValue* value, Handle<String> type_literal)
+      : HUnaryPredicate(value), type_literal_(type_literal) { }
+
+  Handle<String> type_literal() { return type_literal_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof_is")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HTypeofIs* b = HTypeofIs::cast(other);
+    return type_literal_.is_identical_to(b->type_literal_);
+  }
+
+ private:
+  Handle<String> type_literal_;
+};
+
+
+class HInstanceOf: public HBinaryOperation {
+ public:
+  HInstanceOf(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  virtual bool EmitAtUses() const { return uses()->length() <= 1; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance_of")
+};
+
+
+class HPower: public HBinaryOperation {
+ public:
+  HPower(HValue* left, HValue* right)
+      : HBinaryOperation(left, right) {
+    set_representation(Representation::Double());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return (index == 1) ? Representation::None() : Representation::Double();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+};
+
+
+class HAdd: public HArithmeticBinaryOperation {
+ public:
+  HAdd(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanOverflow);
+  }
+
+  // Add is only commutative if two integer values are added and not if two
+  // tagged values are added (because it might be a String concatenation).
+  virtual bool IsCommutative() const {
+    return !representation().IsTagged();
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Add, "add")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HSub: public HArithmeticBinaryOperation {
+ public:
+  HSub(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanOverflow);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  DECLARE_CONCRETE_INSTRUCTION(Sub, "sub")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HMul: public HArithmeticBinaryOperation {
+ public:
+  HMul(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanOverflow);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  // Only commutative if it is certain that not two objects are multiplicated.
+  virtual bool IsCommutative() const {
+    return !representation().IsTagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Mul, "mul")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HMod: public HArithmeticBinaryOperation {
+ public:
+  HMod(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanBeDivByZero);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  DECLARE_CONCRETE_INSTRUCTION(Mod, "mod")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HDiv: public HArithmeticBinaryOperation {
+ public:
+  HDiv(HValue* left, HValue* right) : HArithmeticBinaryOperation(left, right) {
+    SetFlag(kCanBeDivByZero);
+    SetFlag(kCanOverflow);
+  }
+
+  virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
+
+  DECLARE_CONCRETE_INSTRUCTION(Div, "div")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HBitAnd: public HBitwiseBinaryOperation {
+ public:
+  HBitAnd(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitAnd, "bit_and")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HBitXor: public HBitwiseBinaryOperation {
+ public:
+  HBitXor(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitXor, "bit_xor")
+};
+
+
+class HBitOr: public HBitwiseBinaryOperation {
+ public:
+  HBitOr(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(BitOr, "bit_or")
+
+ protected:
+  virtual Range* InferRange();
+};
+
+
+class HShl: public HBitwiseBinaryOperation {
+ public:
+  HShl(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Shl, "shl")
+};
+
+
+class HShr: public HBitwiseBinaryOperation {
+ public:
+  HShr(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Shr, "shr")
+};
+
+
+class HSar: public HBitwiseBinaryOperation {
+ public:
+  HSar(HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(left, right) { }
+
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Sar, "sar")
+};
+
+
+class HOsrEntry: public HInstruction {
+ public:
+  explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
+    SetFlag(kChangesOsrEntries);
+  }
+
+  int ast_id() const { return ast_id_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr_entry")
+
+ private:
+  int ast_id_;
+};
+
+
+class HParameter: public HInstruction {
+ public:
+  explicit HParameter(unsigned index) : index_(index) {
+    set_representation(Representation::Tagged());
+  }
+
+  unsigned index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+
+ private:
+  unsigned index_;
+};
+
+
+class HCallStub: public HInstruction {
+ public:
+  HCallStub(CodeStub::Major major_key, int argument_count)
+      : major_key_(major_key),
+        argument_count_(argument_count),
+        transcendental_type_(TranscendentalCache::kNumberOfCaches) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  CodeStub::Major major_key() { return major_key_; }
+  int argument_count() { return argument_count_; }
+
+  void set_transcendental_type(TranscendentalCache::Type transcendental_type) {
+    transcendental_type_ = transcendental_type;
+  }
+  TranscendentalCache::Type transcendental_type() {
+    return transcendental_type_;
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call_stub")
+
+ private:
+  CodeStub::Major major_key_;
+  int argument_count_;
+  TranscendentalCache::Type transcendental_type_;
+};
+
+
+class HUnknownOSRValue: public HInstruction {
+ public:
+  HUnknownOSRValue() { set_representation(Representation::Tagged()); }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown_osr_value")
+};
+
+
+class HLoadGlobal: public HInstruction {
+ public:
+  HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+      : cell_(cell), check_hole_value_(check_hole_value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnGlobalVars);
+  }
+
+  Handle<JSGlobalPropertyCell>  cell() const { return cell_; }
+  bool check_hole_value() const { return check_hole_value_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual intptr_t Hashcode() const {
+    ASSERT(!Heap::allow_allocation(false));
+    return reinterpret_cast<intptr_t>(*cell_);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadGlobal* b = HLoadGlobal::cast(other);
+    return cell_.is_identical_to(b->cell());
+  }
+
+ private:
+  Handle<JSGlobalPropertyCell> cell_;
+  bool check_hole_value_;
+};
+
+
+class HStoreGlobal: public HUnaryOperation {
+ public:
+  HStoreGlobal(HValue* value, Handle<JSGlobalPropertyCell> cell)
+      : HUnaryOperation(value), cell_(cell) {
+    SetFlag(kChangesGlobalVars);
+  }
+
+  Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HStoreGlobal* b = HStoreGlobal::cast(other);
+    return cell_.is_identical_to(b->cell());
+  }
+
+ private:
+  Handle<JSGlobalPropertyCell> cell_;
+};
+
+
+class HLoadNamedField: public HUnaryOperation {
+ public:
+  HLoadNamedField(HValue* object, bool is_in_object, int offset)
+      : HUnaryOperation(object),
+        is_in_object_(is_in_object),
+        offset_(offset) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    if (is_in_object) {
+      SetFlag(kDependsOnInobjectFields);
+    } else {
+      SetFlag(kDependsOnBackingStoreFields);
+    }
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  bool is_in_object() const { return is_in_object_; }
+  int offset() const { return offset_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load_named_field")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadNamedField* b = HLoadNamedField::cast(other);
+    return is_in_object_ == b->is_in_object_ && offset_ == b->offset_;
+  }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+};
+
+
+class HLoadNamedGeneric: public HUnaryOperation {
+ public:
+  HLoadNamedGeneric(HValue* object, Handle<Object> name)
+      : HUnaryOperation(object), name_(name) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  Handle<Object> name() const { return name_; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load_named_generic")
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HLoadNamedGeneric* b = HLoadNamedGeneric::cast(other);
+    return name_.is_identical_to(b->name_);
+  }
+
+ private:
+  Handle<Object> name_;
+};
+
+
+class HLoadKeyed: public HBinaryOperation {
+ public:
+  HLoadKeyed(HValue* obj, HValue* key) : HBinaryOperation(obj, key) {
+    set_representation(Representation::Tagged());
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+
+  DECLARE_INSTRUCTION(LoadKeyed)
+};
+
+
+class HLoadKeyedFastElement: public HLoadKeyed {
+ public:
+  HLoadKeyedFastElement(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
+    SetFlag(kDependsOnArrayElements);
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The key is supposed to be Integer32.
+    return (index == 1) ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement,
+                               "load_keyed_fast_element")
+};
+
+
+class HLoadKeyedGeneric: public HLoadKeyed {
+ public:
+  HLoadKeyedGeneric(HValue* obj, HValue* key) : HLoadKeyed(obj, key) {
+    SetFlagMask(AllSideEffects());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load_keyed_generic")
+};
+
+
+class HStoreNamed: public HBinaryOperation {
+ public:
+  HStoreNamed(HValue* obj, Handle<Object> name, HValue* val)
+      : HBinaryOperation(obj, val), name_(name) {
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  HValue* object() const { return OperandAt(0); }
+  Handle<Object> name() const { return name_; }
+  HValue* value() const { return OperandAt(1); }
+  void set_value(HValue* value) { SetOperandAt(1, value); }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+ protected:
+  virtual bool DataEquals(HValue* other) const {
+    HStoreNamed* b = HStoreNamed::cast(other);
+    return name_.is_identical_to(b->name_);
+  }
+
+ private:
+  Handle<Object> name_;
+};
+
+
+class HStoreNamedField: public HStoreNamed {
+ public:
+  HStoreNamedField(HValue* obj,
+                   Handle<Object> name,
+                   HValue* val,
+                   bool in_object,
+                   int offset)
+      : HStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset) {
+    if (is_in_object_) {
+      SetFlag(kChangesInobjectFields);
+    } else {
+      SetFlag(kChangesBackingStoreFields);
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store_named_field")
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return  Representation::Tagged();
+  }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool is_in_object() const { return is_in_object_; }
+  int offset() const { return offset_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  Handle<Map> transition_;
+};
+
+
+class HStoreNamedGeneric: public HStoreNamed {
+ public:
+  HStoreNamedGeneric(HValue* obj, Handle<Object> name, HValue* val)
+      : HStoreNamed(obj, name, val) {
+    SetFlagMask(AllSideEffects());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store_named_generic")
+};
+
+
+class HStoreKeyed: public HInstruction {
+ public:
+  HStoreKeyed(HValue* obj, HValue* key, HValue* val) {
+    SetOperandAt(0, obj);
+    SetOperandAt(1, key);
+    SetOperandAt(2, val);
+  }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual int OperandCount() const { return operands_.length(); }
+  virtual HValue* OperandAt(int index) const { return operands_[index]; }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* value() const { return OperandAt(2); }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+ protected:
+  virtual void InternalSetOperandAt(int index, HValue* value) {
+    operands_[index] = value;
+  }
+
+ private:
+  HOperandVector<3> operands_;
+};
+
+
+class HStoreKeyedFastElement: public HStoreKeyed {
+ public:
+  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val)
+      : HStoreKeyed(obj, key, val) {
+    SetFlag(kChangesArrayElements);
+  }
+
+  bool NeedsWriteBarrier() const {
+    return !value()->type().IsSmi();
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    // The key is supposed to be Integer32.
+    return (index == 1) ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store_keyed_fast_element")
+};
+
+
+class HStoreKeyedGeneric: public HStoreKeyed {
+ public:
+  HStoreKeyedGeneric(HValue* obj, HValue* key, HValue* val)
+      : HStoreKeyed(obj, key, val) {
+    SetFlagMask(AllSideEffects());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+};
+
+
+class HMaterializedLiteral: public HInstruction {
+ public:
+  HMaterializedLiteral(int index, int depth)
+      : literal_index_(index), depth_(depth) {
+    set_representation(Representation::Tagged());
+  }
+
+  int literal_index() const { return literal_index_; }
+  int depth() const { return depth_; }
+
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+
+ private:
+  int literal_index_;
+  int depth_;
+};
+
+
+class HArrayLiteral: public HMaterializedLiteral {
+ public:
+  HArrayLiteral(Handle<FixedArray> constant_elements,
+                int length,
+                int literal_index,
+                int depth)
+      : HMaterializedLiteral(literal_index, depth),
+        length_(length),
+        constant_elements_(constant_elements) {}
+
+  Handle<FixedArray> constant_elements() const { return constant_elements_; }
+  int length() const { return length_; }
+
+  bool IsCopyOnWrite() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array_literal")
+
+ private:
+  int length_;
+  Handle<FixedArray> constant_elements_;
+};
+
+
+class HObjectLiteral: public HMaterializedLiteral {
+ public:
+  HObjectLiteral(Handle<FixedArray> constant_properties,
+                 bool fast_elements,
+                 int literal_index,
+                 int depth)
+      : HMaterializedLiteral(literal_index, depth),
+        constant_properties_(constant_properties),
+        fast_elements_(fast_elements) {}
+
+  Handle<FixedArray> constant_properties() const {
+    return constant_properties_;
+  }
+  bool fast_elements() const { return fast_elements_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object_literal")
+
+ private:
+  Handle<FixedArray> constant_properties_;
+  bool fast_elements_;
+};
+
+
+class HRegExpLiteral: public HMaterializedLiteral {
+ public:
+  HRegExpLiteral(Handle<String> pattern,
+                 Handle<String> flags,
+                 int literal_index)
+      : HMaterializedLiteral(literal_index, 0),
+        pattern_(pattern),
+        flags_(flags) { }
+
+  Handle<String> pattern() { return pattern_; }
+  Handle<String> flags() { return flags_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp_literal")
+
+ private:
+  Handle<String> pattern_;
+  Handle<String> flags_;
+};
+
+
+class HFunctionLiteral: public HInstruction {
+ public:
+  HFunctionLiteral(Handle<SharedFunctionInfo> shared, bool pretenure)
+      : shared_info_(shared), pretenure_(pretenure) {
+    set_representation(Representation::Tagged());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function_literal")
+
+  Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+  bool pretenure() const { return pretenure_; }
+
+ private:
+  Handle<SharedFunctionInfo> shared_info_;
+  bool pretenure_;
+};
+
+
+class HTypeof: public HUnaryOperation {
+ public:
+  explicit HTypeof(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class HValueOf: public HUnaryOperation {
+ public:
+  explicit HValueOf(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value_of")
+};
+
+
+class HDeleteProperty: public HBinaryOperation {
+ public:
+  HDeleteProperty(HValue* obj, HValue* key)
+      : HBinaryOperation(obj, key) {
+    set_representation(Representation::Tagged());
+    SetFlagMask(AllSideEffects());
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete_property")
+
+  HValue* object() const { return left(); }
+  HValue* key() const { return right(); }
+};
+
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_INSTRUCTIONS_H_
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
new file mode 100644
index 0000000..e34acd6
--- /dev/null
+++ b/src/hydrogen.cc
@@ -0,0 +1,5686 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "hydrogen.h"
+
+#include "codegen.h"
+#include "data-flow.h"
+#include "full-codegen.h"
+#include "hashmap.h"
+#include "lithium-allocator.h"
+#include "parser.h"
+#include "scopes.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-codegen-arm.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+HBasicBlock::HBasicBlock(HGraph* graph)
+    : block_id_(graph->GetNextBlockID()),
+      graph_(graph),
+      phis_(4),
+      first_(NULL),
+      last_(NULL),
+      end_(NULL),
+      loop_information_(NULL),
+      predecessors_(2),
+      dominator_(NULL),
+      dominated_blocks_(4),
+      last_environment_(NULL),
+      argument_count_(-1),
+      first_instruction_index_(-1),
+      last_instruction_index_(-1),
+      deleted_phis_(4),
+      is_inline_return_target_(false) {
+}
+
+
+void HBasicBlock::AttachLoopInformation() {
+  ASSERT(!IsLoopHeader());
+  loop_information_ = new HLoopInformation(this);
+}
+
+
+void HBasicBlock::DetachLoopInformation() {
+  ASSERT(IsLoopHeader());
+  loop_information_ = NULL;
+}
+
+
+void HBasicBlock::AddPhi(HPhi* phi) {
+  ASSERT(!IsStartBlock());
+  phis_.Add(phi);
+  phi->SetBlock(this);
+}
+
+
+void HBasicBlock::RemovePhi(HPhi* phi) {
+  ASSERT(phi->block() == this);
+  ASSERT(phis_.Contains(phi));
+  ASSERT(phi->HasNoUses());
+  phi->ClearOperands();
+  phis_.RemoveElement(phi);
+  phi->SetBlock(NULL);
+}
+
+
+void HBasicBlock::AddInstruction(HInstruction* instr) {
+  ASSERT(!IsStartBlock() || !IsFinished());
+  ASSERT(!instr->IsLinked());
+  ASSERT(!IsFinished());
+  if (first_ == NULL) {
+    HBlockEntry* entry = new HBlockEntry();
+    entry->InitializeAsFirst(this);
+    first_ = entry;
+  }
+  instr->InsertAfter(GetLastInstruction());
+}
+
+
+HInstruction* HBasicBlock::GetLastInstruction() {
+  if (end_ != NULL) return end_->previous();
+  if (first_ == NULL) return NULL;
+  if (last_ == NULL) last_ = first_;
+  while (last_->next() != NULL) last_ = last_->next();
+  return last_;
+}
+
+
+HSimulate* HBasicBlock::CreateSimulate(int id) {
+  ASSERT(HasEnvironment());
+  HEnvironment* environment = last_environment();
+  ASSERT(id == AstNode::kNoNumber ||
+         environment->closure()->shared()->VerifyBailoutId(id));
+
+  int push_count = environment->push_count();
+  int pop_count = environment->pop_count();
+
+  int length = environment->values()->length();
+  HSimulate* instr = new HSimulate(id, pop_count, length);
+  for (int i = push_count - 1; i >= 0; --i) {
+    instr->AddPushedValue(environment->ExpressionStackAt(i));
+  }
+  for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
+    int index = environment->assigned_variables()->at(i);
+    instr->AddAssignedValue(index, environment->Lookup(index));
+  }
+  environment->ClearHistory();
+  return instr;
+}
+
+
+void HBasicBlock::Finish(HControlInstruction* end) {
+  ASSERT(!IsFinished());
+  AddInstruction(end);
+  end_ = end;
+  if (end->FirstSuccessor() != NULL) {
+    end->FirstSuccessor()->RegisterPredecessor(this);
+    if (end->SecondSuccessor() != NULL) {
+      end->SecondSuccessor()->RegisterPredecessor(this);
+    }
+  }
+}
+
+
+void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
+  AddSimulate(AstNode::kNoNumber);
+  HGoto* instr = new HGoto(block);
+  instr->set_include_stack_check(include_stack_check);
+  Finish(instr);
+}
+
+
+void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
+  ASSERT(!HasEnvironment());
+  ASSERT(first() == NULL);
+  UpdateEnvironment(env);
+}
+
+
+void HBasicBlock::SetJoinId(int id) {
+  int length = predecessors_.length();
+  ASSERT(length > 0);
+  for (int i = 0; i < length; i++) {
+    HBasicBlock* predecessor = predecessors_[i];
+    ASSERT(predecessor->end()->IsGoto());
+    HSimulate* simulate = HSimulate::cast(predecessor->GetLastInstruction());
+    // We only need to verify the ID once.
+    ASSERT(i != 0 ||
+           predecessor->last_environment()->closure()->shared()
+               ->VerifyBailoutId(id));
+    simulate->set_ast_id(id);
+  }
+}
+
+
+bool HBasicBlock::Dominates(HBasicBlock* other) const {
+  HBasicBlock* current = other->dominator();
+  while (current != NULL) {
+    if (current == this) return true;
+    current = current->dominator();
+  }
+  return false;
+}
+
+
+void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
+  ASSERT(IsLoopHeader());
+
+  SetJoinId(stmt->EntryId());
+  if (predecessors()->length() == 1) {
+    // This is a degenerated loop.
+    DetachLoopInformation();
+    return;
+  }
+
+  // Only the first entry into the loop is from outside the loop. All other
+  // entries must be back edges.
+  for (int i = 1; i < predecessors()->length(); ++i) {
+    loop_information()->RegisterBackEdge(predecessors()->at(i));
+  }
+}
+
+
+void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
+  if (!predecessors_.is_empty()) {
+    // Only loop header blocks can have a predecessor added after
+    // instructions have been added to the block (they have phis for all
+    // values in the environment, these phis may be eliminated later).
+    ASSERT(IsLoopHeader() || first_ == NULL);
+    HEnvironment* incoming_env = pred->last_environment();
+    if (IsLoopHeader()) {
+      ASSERT(phis()->length() == incoming_env->values()->length());
+      for (int i = 0; i < phis_.length(); ++i) {
+        phis_[i]->AddInput(incoming_env->values()->at(i));
+      }
+    } else {
+      last_environment()->AddIncomingEdge(this, pred->last_environment());
+    }
+  } else if (!HasEnvironment() && !IsFinished()) {
+    ASSERT(!IsLoopHeader());
+    SetInitialEnvironment(pred->last_environment()->Copy());
+  }
+
+  predecessors_.Add(pred);
+}
+
+
+void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
+  ASSERT(!dominated_blocks_.Contains(block));
+  // Keep the list of dominated blocks sorted such that if there is two
+  // succeeding block in this list, the predecessor is before the successor.
+  int index = 0;
+  while (index < dominated_blocks_.length() &&
+         dominated_blocks_[index]->block_id() < block->block_id()) {
+    ++index;
+  }
+  dominated_blocks_.InsertAt(index, block);
+}
+
+
+void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
+  if (dominator_ == NULL) {
+    dominator_ = other;
+    other->AddDominatedBlock(this);
+  } else if (other->dominator() != NULL) {
+    HBasicBlock* first = dominator_;
+    HBasicBlock* second = other;
+
+    while (first != second) {
+      if (first->block_id() > second->block_id()) {
+        first = first->dominator();
+      } else {
+        second = second->dominator();
+      }
+      ASSERT(first != NULL && second != NULL);
+    }
+
+    if (dominator_ != first) {
+      ASSERT(dominator_->dominated_blocks_.Contains(this));
+      dominator_->dominated_blocks_.RemoveElement(this);
+      dominator_ = first;
+      first->AddDominatedBlock(this);
+    }
+  }
+}
+
+
+int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
+  for (int i = 0; i < predecessors_.length(); ++i) {
+    if (predecessors_[i] == predecessor) return i;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+#ifdef DEBUG
+void HBasicBlock::Verify() {
+  // Check that every block is finished.
+  ASSERT(IsFinished());
+  ASSERT(block_id() >= 0);
+
+  // Verify that all blocks targetting a branch target, have the same boolean
+  // value on top of their expression stack.
+  if (!cond().is_null()) {
+    ASSERT(predecessors()->length() > 0);
+    for (int i = 1; i < predecessors()->length(); i++) {
+      HBasicBlock* pred = predecessors()->at(i);
+      HValue* top = pred->last_environment()->Top();
+      ASSERT(top->IsConstant());
+      Object* a = *HConstant::cast(top)->handle();
+      Object* b = *cond();
+      ASSERT(a == b);
+    }
+  }
+}
+#endif
+
+
+void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
+  this->back_edges_.Add(block);
+  AddBlock(block);
+}
+
+
+HBasicBlock* HLoopInformation::GetLastBackEdge() const {
+  int max_id = -1;
+  HBasicBlock* result = NULL;
+  for (int i = 0; i < back_edges_.length(); ++i) {
+    HBasicBlock* cur = back_edges_[i];
+    if (cur->block_id() > max_id) {
+      max_id = cur->block_id();
+      result = cur;
+    }
+  }
+  return result;
+}
+
+
+void HLoopInformation::AddBlock(HBasicBlock* block) {
+  if (block == loop_header()) return;
+  if (block->parent_loop_header() == loop_header()) return;
+  if (block->parent_loop_header() != NULL) {
+    AddBlock(block->parent_loop_header());
+  } else {
+    block->set_parent_loop_header(loop_header());
+    blocks_.Add(block);
+    for (int i = 0; i < block->predecessors()->length(); ++i) {
+      AddBlock(block->predecessors()->at(i));
+    }
+  }
+}
+
+
+#ifdef DEBUG
+
+// Checks reachability of the blocks in this graph and stores a bit in
+// the BitVector "reachable()" for every block that can be reached
+// from the start block of the graph. If "dont_visit" is non-null, the given
+// block is treated as if it would not be part of the graph. "visited_count()"
+// returns the number of reachable blocks.
+class ReachabilityAnalyzer BASE_EMBEDDED {
+ public:
+  ReachabilityAnalyzer(HBasicBlock* entry_block,
+                       int block_count,
+                       HBasicBlock* dont_visit)
+      : visited_count_(0),
+        stack_(16),
+        reachable_(block_count),
+        dont_visit_(dont_visit) {
+    PushBlock(entry_block);
+    Analyze();
+  }
+
+  int visited_count() const { return visited_count_; }
+  const BitVector* reachable() const { return &reachable_; }
+
+ private:
+  void PushBlock(HBasicBlock* block) {
+    if (block != NULL && block != dont_visit_ &&
+        !reachable_.Contains(block->block_id())) {
+      reachable_.Add(block->block_id());
+      stack_.Add(block);
+      visited_count_++;
+    }
+  }
+
+  void Analyze() {
+    while (!stack_.is_empty()) {
+      HControlInstruction* end = stack_.RemoveLast()->end();
+      PushBlock(end->FirstSuccessor());
+      PushBlock(end->SecondSuccessor());
+    }
+  }
+
+  int visited_count_;
+  ZoneList<HBasicBlock*> stack_;
+  BitVector reachable_;
+  HBasicBlock* dont_visit_;
+};
+
+
+void HGraph::Verify() const {
+  for (int i = 0; i < blocks_.length(); i++) {
+    HBasicBlock* block = blocks_.at(i);
+
+    block->Verify();
+
+    // Check that every block contains at least one node and that only the last
+    // node is a control instruction.
+    HInstruction* current = block->first();
+    ASSERT(current != NULL && current->IsBlockEntry());
+    while (current != NULL) {
+      ASSERT((current->next() == NULL) == current->IsControlInstruction());
+      ASSERT(current->block() == block);
+      current->Verify();
+      current = current->next();
+    }
+
+    // Check that successors are correctly set.
+    HBasicBlock* first = block->end()->FirstSuccessor();
+    HBasicBlock* second = block->end()->SecondSuccessor();
+    ASSERT(second == NULL || first != NULL);
+
+    // Check that the predecessor array is correct.
+    if (first != NULL) {
+      ASSERT(first->predecessors()->Contains(block));
+      if (second != NULL) {
+        ASSERT(second->predecessors()->Contains(block));
+      }
+    }
+
+    // Check that phis have correct arguments.
+    for (int j = 0; j < block->phis()->length(); j++) {
+      HPhi* phi = block->phis()->at(j);
+      phi->Verify();
+    }
+
+    // Check that all join blocks have predecessors that end with an
+    // unconditional goto and agree on their environment node id.
+    if (block->predecessors()->length() >= 2) {
+      int id = block->predecessors()->first()->last_environment()->ast_id();
+      for (int k = 0; k < block->predecessors()->length(); k++) {
+        HBasicBlock* predecessor = block->predecessors()->at(k);
+        ASSERT(predecessor->end()->IsGoto());
+        ASSERT(predecessor->last_environment()->ast_id() == id);
+      }
+    }
+  }
+
+  // Check special property of first block to have no predecessors.
+  ASSERT(blocks_.at(0)->predecessors()->is_empty());
+
+  // Check that the graph is fully connected.
+  ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+  ASSERT(analyzer.visited_count() == blocks_.length());
+
+  // Check that entry block dominator is NULL.
+  ASSERT(entry_block_->dominator() == NULL);
+
+  // Check dominators.
+  for (int i = 0; i < blocks_.length(); ++i) {
+    HBasicBlock* block = blocks_.at(i);
+    if (block->dominator() == NULL) {
+      // Only start block may have no dominator assigned to.
+      ASSERT(i == 0);
+    } else {
+      // Assert that block is unreachable if dominator must not be visited.
+      ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                              blocks_.length(),
+                                              block->dominator());
+      ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+    }
+  }
+}
+
+#endif
+
+
+HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
+                               Object* value) {
+  if (!pointer->is_set()) {
+    HConstant* constant = new HConstant(Handle<Object>(value),
+                                        Representation::Tagged());
+    constant->InsertAfter(GetConstantUndefined());
+    pointer->set(constant);
+  }
+  return pointer->get();
+}
+
+
+HConstant* HGraph::GetConstant1() {
+  return GetConstant(&constant_1_, Smi::FromInt(1));
+}
+
+
+HConstant* HGraph::GetConstantMinus1() {
+  return GetConstant(&constant_minus1_, Smi::FromInt(-1));
+}
+
+
+HConstant* HGraph::GetConstantTrue() {
+  return GetConstant(&constant_true_, Heap::true_value());
+}
+
+
+HConstant* HGraph::GetConstantFalse() {
+  return GetConstant(&constant_false_, Heap::false_value());
+}
+
+
+void HSubgraph::AppendOptional(HSubgraph* graph,
+                               bool on_true_branch,
+                               HValue* boolean_value) {
+  ASSERT(HasExit() && graph->HasExit());
+  HBasicBlock* other_block = graph_->CreateBasicBlock();
+  HBasicBlock* join_block = graph_->CreateBasicBlock();
+
+  HBasicBlock* true_branch = other_block;
+  HBasicBlock* false_branch = graph->entry_block();
+  if (on_true_branch) {
+    true_branch = graph->entry_block();
+    false_branch = other_block;
+  }
+
+  exit_block_->Finish(new HBranch(true_branch, false_branch, boolean_value));
+  other_block->Goto(join_block);
+  graph->exit_block()->Goto(join_block);
+  exit_block_ = join_block;
+}
+
+
+void HSubgraph::AppendJoin(HSubgraph* then_graph,
+                           HSubgraph* else_graph,
+                           AstNode* node) {
+  if (then_graph->HasExit() && else_graph->HasExit()) {
+    // We need to merge, create new merge block.
+    HBasicBlock* join_block = graph_->CreateBasicBlock();
+    then_graph->exit_block()->Goto(join_block);
+    else_graph->exit_block()->Goto(join_block);
+    join_block->SetJoinId(node->id());
+    exit_block_ = join_block;
+  } else if (then_graph->HasExit()) {
+    exit_block_ = then_graph->exit_block_;
+  } else if (else_graph->HasExit()) {
+    exit_block_ = else_graph->exit_block_;
+  } else {
+    exit_block_ = NULL;
+  }
+}
+
+
+void HSubgraph::ResolveContinue(IterationStatement* statement) {
+  HBasicBlock* continue_block = BundleContinue(statement);
+  if (continue_block != NULL) {
+    exit_block_ = JoinBlocks(exit_block(),
+                             continue_block,
+                             statement->ContinueId());
+  }
+}
+
+
+HBasicBlock* HSubgraph::BundleBreak(BreakableStatement* statement) {
+  return BundleBreakContinue(statement, false, statement->ExitId());
+}
+
+
+HBasicBlock* HSubgraph::BundleContinue(IterationStatement* statement) {
+  return BundleBreakContinue(statement, true, statement->ContinueId());
+}
+
+
+HBasicBlock* HSubgraph::BundleBreakContinue(BreakableStatement* statement,
+                                            bool is_continue,
+                                            int join_id) {
+  HBasicBlock* result = NULL;
+  const ZoneList<BreakContinueInfo*>* infos = break_continue_info();
+  for (int i = 0; i < infos->length(); ++i) {
+    BreakContinueInfo* info = infos->at(i);
+    if (info->is_continue() == is_continue &&
+        info->target() == statement &&
+        !info->IsResolved()) {
+      if (result == NULL) {
+        result = graph_->CreateBasicBlock();
+      }
+      info->block()->Goto(result);
+      info->Resolve();
+    }
+  }
+
+  if (result != NULL) result->SetJoinId(join_id);
+
+  return result;
+}
+
+
+HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) {
+  if (a == NULL) return b;
+  if (b == NULL) return a;
+  HBasicBlock* target = graph_->CreateBasicBlock();
+  a->Goto(target);
+  b->Goto(target);
+  target->SetJoinId(id);
+  return target;
+}
+
+
+void HSubgraph::AppendEndless(HSubgraph* body, IterationStatement* statement) {
+  ConnectExitTo(body->entry_block());
+  body->ResolveContinue(statement);
+  body->ConnectExitTo(body->entry_block(), true);
+  exit_block_ = body->BundleBreak(statement);
+  body->entry_block()->PostProcessLoopHeader(statement);
+}
+
+
+void HSubgraph::AppendDoWhile(HSubgraph* body,
+                              IterationStatement* statement,
+                              HSubgraph* go_back,
+                              HSubgraph* exit) {
+  ConnectExitTo(body->entry_block());
+  go_back->ConnectExitTo(body->entry_block(), true);
+
+  HBasicBlock* break_block = body->BundleBreak(statement);
+  exit_block_ =
+      JoinBlocks(exit->exit_block(), break_block, statement->ExitId());
+  body->entry_block()->PostProcessLoopHeader(statement);
+}
+
+
+void HSubgraph::AppendWhile(HSubgraph* condition,
+                            HSubgraph* body,
+                            IterationStatement* statement,
+                            HSubgraph* continue_subgraph,
+                            HSubgraph* exit) {
+  ConnectExitTo(condition->entry_block());
+
+  HBasicBlock* break_block = body->BundleBreak(statement);
+  exit_block_ =
+      JoinBlocks(exit->exit_block(), break_block, statement->ExitId());
+
+  if (continue_subgraph != NULL) {
+    body->ConnectExitTo(continue_subgraph->entry_block(), true);
+    continue_subgraph->entry_block()->SetJoinId(statement->EntryId());
+    exit_block_ = JoinBlocks(exit_block_,
+                             continue_subgraph->exit_block(),
+                             statement->ExitId());
+  } else {
+    body->ConnectExitTo(condition->entry_block(), true);
+  }
+  condition->entry_block()->PostProcessLoopHeader(statement);
+}
+
+
+void HSubgraph::Append(HSubgraph* next, BreakableStatement* stmt) {
+  exit_block_->Goto(next->entry_block());
+  exit_block_ = next->exit_block_;
+
+  if (stmt != NULL) {
+    next->entry_block()->SetJoinId(stmt->EntryId());
+    HBasicBlock* break_block = next->BundleBreak(stmt);
+    exit_block_ = JoinBlocks(exit_block(), break_block, stmt->ExitId());
+  }
+}
+
+
+void HSubgraph::FinishExit(HControlInstruction* instruction) {
+  ASSERT(HasExit());
+  exit_block_->Finish(instruction);
+  exit_block_->ClearEnvironment();
+  exit_block_ = NULL;
+}
+
+
+void HSubgraph::FinishBreakContinue(BreakableStatement* target,
+                                    bool is_continue) {
+  ASSERT(!exit_block_->IsFinished());
+  BreakContinueInfo* info = new BreakContinueInfo(target, exit_block_,
+                                                  is_continue);
+  break_continue_info_.Add(info);
+  exit_block_ = NULL;
+}
+
+
+HGraph::HGraph(CompilationInfo* info)
+    : HSubgraph(this),
+      next_block_id_(0),
+      info_(info),
+      blocks_(8),
+      values_(16),
+      phi_list_(NULL) {
+  start_environment_ = new HEnvironment(NULL, info->scope(), info->closure());
+  start_environment_->set_ast_id(info->function()->id());
+}
+
+
+Handle<Code> HGraph::Compile() {
+  int values = GetMaximumValueID();
+  if (values > LAllocator::max_initial_value_ids()) {
+    if (FLAG_trace_bailout) PrintF("Function is too big\n");
+    return Handle<Code>::null();
+  }
+
+  LAllocator allocator(values, this);
+  LChunkBuilder builder(this, &allocator);
+  LChunk* chunk = builder.Build();
+  if (chunk == NULL) return Handle<Code>::null();
+
+  if (!FLAG_alloc_lithium) return Handle<Code>::null();
+
+  allocator.Allocate(chunk);
+
+  if (!FLAG_use_lithium) return Handle<Code>::null();
+
+  MacroAssembler assembler(NULL, 0);
+  LCodeGen generator(chunk, &assembler, info());
+
+  if (FLAG_eliminate_empty_blocks) {
+    chunk->MarkEmptyBlocks();
+  }
+
+  if (generator.GenerateCode()) {
+    if (FLAG_trace_codegen) {
+      PrintF("Crankshaft Compiler - ");
+    }
+    CodeGenerator::MakeCodePrologue(info());
+    Code::Flags flags =
+        Code::ComputeFlags(Code::OPTIMIZED_FUNCTION, NOT_IN_LOOP);
+    Handle<Code> code =
+        CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+    generator.FinishCode(code);
+    CodeGenerator::PrintCode(code, info());
+    return code;
+  }
+  return Handle<Code>::null();
+}
+
+
+HBasicBlock* HGraph::CreateBasicBlock() {
+  HBasicBlock* result = new HBasicBlock(this);
+  blocks_.Add(result);
+  return result;
+}
+
+
+void HGraph::Canonicalize() {
+  HPhase phase("Canonicalize", this);
+  if (FLAG_use_canonicalizing) {
+    for (int i = 0; i < blocks()->length(); ++i) {
+      HBasicBlock* b = blocks()->at(i);
+      for (HInstruction* insn = b->first(); insn != NULL; insn = insn->next()) {
+        HValue* value = insn->Canonicalize();
+        if (value != insn) {
+          if (value != NULL) {
+            insn->ReplaceAndDelete(value);
+          } else {
+            insn->Delete();
+          }
+        }
+      }
+    }
+  }
+}
+
+
+void HGraph::OrderBlocks() {
+  HPhase phase("Block ordering");
+  BitVector visited(blocks_.length());
+
+  ZoneList<HBasicBlock*> reverse_result(8);
+  HBasicBlock* start = blocks_[0];
+  Postorder(start, &visited, &reverse_result, NULL);
+
+  blocks_.Clear();
+  int index = 0;
+  for (int i = reverse_result.length() - 1; i >= 0; --i) {
+    HBasicBlock* b = reverse_result[i];
+    blocks_.Add(b);
+    b->set_block_id(index++);
+  }
+}
+
+
+void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
+                                 BitVector* visited,
+                                 ZoneList<HBasicBlock*>* order,
+                                 HBasicBlock* loop_header) {
+  for (int i = 0; i < loop->blocks()->length(); ++i) {
+    HBasicBlock* b = loop->blocks()->at(i);
+    Postorder(b->end()->SecondSuccessor(), visited, order, loop_header);
+    Postorder(b->end()->FirstSuccessor(), visited, order, loop_header);
+    if (b->IsLoopHeader() && b != loop->loop_header()) {
+      PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
+    }
+  }
+}
+
+
+void HGraph::Postorder(HBasicBlock* block,
+                       BitVector* visited,
+                       ZoneList<HBasicBlock*>* order,
+                       HBasicBlock* loop_header) {
+  if (block == NULL || visited->Contains(block->block_id())) return;
+  if (block->parent_loop_header() != loop_header) return;
+  visited->Add(block->block_id());
+  if (block->IsLoopHeader()) {
+    PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
+    Postorder(block->end()->SecondSuccessor(), visited, order, block);
+    Postorder(block->end()->FirstSuccessor(), visited, order, block);
+  } else {
+    Postorder(block->end()->SecondSuccessor(), visited, order, loop_header);
+    Postorder(block->end()->FirstSuccessor(), visited, order, loop_header);
+  }
+  ASSERT(block->end()->FirstSuccessor() == NULL ||
+         order->Contains(block->end()->FirstSuccessor()) ||
+         block->end()->FirstSuccessor()->IsLoopHeader());
+  ASSERT(block->end()->SecondSuccessor() == NULL ||
+         order->Contains(block->end()->SecondSuccessor()) ||
+         block->end()->SecondSuccessor()->IsLoopHeader());
+  order->Add(block);
+}
+
+
+void HGraph::AssignDominators() {
+  HPhase phase("Assign dominators", this);
+  for (int i = 0; i < blocks_.length(); ++i) {
+    if (blocks_[i]->IsLoopHeader()) {
+      blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
+    } else {
+      for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
+        blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
+      }
+    }
+  }
+}
+
+
+void HGraph::EliminateRedundantPhis() {
+  HPhase phase("Phi elimination", this);
+  ZoneList<HValue*> uses_to_replace(2);
+
+  // Worklist of phis that can potentially be eliminated. Initialized
+  // with all phi nodes. When elimination of a phi node modifies
+  // another phi node the modified phi node is added to the worklist.
+  ZoneList<HPhi*> worklist(blocks_.length());
+  for (int i = 0; i < blocks_.length(); ++i) {
+    worklist.AddAll(*blocks_[i]->phis());
+  }
+
+  while (!worklist.is_empty()) {
+    HPhi* phi = worklist.RemoveLast();
+    HBasicBlock* block = phi->block();
+
+    // Skip phi node if it was already replaced.
+    if (block == NULL) continue;
+
+    // Get replacement value if phi is redundant.
+    HValue* value = phi->GetRedundantReplacement();
+
+    if (value != NULL) {
+      // Iterate through uses finding the ones that should be
+      // replaced.
+      const ZoneList<HValue*>* uses = phi->uses();
+      for (int i = 0; i < uses->length(); ++i) {
+        HValue* use = uses->at(i);
+        if (!use->block()->IsStartBlock()) {
+          uses_to_replace.Add(use);
+        }
+      }
+      // Replace the uses and add phis modified to the work list.
+      for (int i = 0; i < uses_to_replace.length(); ++i) {
+        HValue* use = uses_to_replace[i];
+        phi->ReplaceAtUse(use, value);
+        if (use->IsPhi()) worklist.Add(HPhi::cast(use));
+      }
+      uses_to_replace.Rewind(0);
+      block->RemovePhi(phi);
+    } else if (phi->HasNoUses() &&
+               !phi->HasReceiverOperand() &&
+               FLAG_eliminate_dead_phis) {
+      // We can't eliminate phis that have the receiver as an operand
+      // because in case of throwing an error we need the correct
+      // receiver value in the environment to construct a corrent
+      // stack trace.
+      block->RemovePhi(phi);
+      block->RecordDeletedPhi(phi->merged_index());
+    }
+  }
+}
+
+
+bool HGraph::CollectPhis() {
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  phi_list_ = new ZoneList<HPhi*>(blocks->length());
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
+      HPhi* phi = blocks->at(i)->phis()->at(j);
+      phi_list_->Add(phi);
+      // We don't support phi uses of arguments for now.
+      if (phi->CheckFlag(HValue::kIsArguments)) return false;
+    }
+  }
+  return true;
+}
+
+
+void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
+  BitVector in_worklist(GetMaximumValueID());
+  for (int i = 0; i < worklist->length(); ++i) {
+    ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
+    in_worklist.Add(worklist->at(i)->id());
+  }
+
+  while (!worklist->is_empty()) {
+    HValue* current = worklist->RemoveLast();
+    in_worklist.Remove(current->id());
+    if (current->UpdateInferredType()) {
+      for (int j = 0; j < current->uses()->length(); j++) {
+        HValue* use = current->uses()->at(j);
+        if (!in_worklist.Contains(use->id())) {
+          in_worklist.Add(use->id());
+          worklist->Add(use);
+        }
+      }
+    }
+  }
+}
+
+
+class HRangeAnalysis BASE_EMBEDDED {
+ public:
+  explicit HRangeAnalysis(HGraph* graph) : graph_(graph), changed_ranges_(16) {}
+
+  void Analyze();
+
+ private:
+  void TraceRange(const char* msg, ...);
+  void Analyze(HBasicBlock* block);
+  void InferControlFlowRange(HBranch* branch, HBasicBlock* dest);
+  void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
+  void InferPhiRange(HPhi* phi);
+  void InferRange(HValue* value);
+  void RollBackTo(int index);
+  void AddRange(HValue* value, Range* range);
+
+  HGraph* graph_;
+  ZoneList<HValue*> changed_ranges_;
+};
+
+
+void HRangeAnalysis::TraceRange(const char* msg, ...) {
+  if (FLAG_trace_range) {
+    va_list arguments;
+    va_start(arguments, msg);
+    OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+void HRangeAnalysis::Analyze() {
+  HPhase phase("Range analysis", graph_);
+  Analyze(graph_->blocks()->at(0));
+}
+
+
+void HRangeAnalysis::Analyze(HBasicBlock* block) {
+  TraceRange("Analyzing block B%d\n", block->block_id());
+
+  int last_changed_range = changed_ranges_.length() - 1;
+
+  // Infer range based on control flow.
+  if (block->predecessors()->length() == 1) {
+    HBasicBlock* pred = block->predecessors()->first();
+    if (pred->end()->IsBranch()) {
+      InferControlFlowRange(HBranch::cast(pred->end()), block);
+    }
+  }
+
+  // Process phi instructions.
+  for (int i = 0; i < block->phis()->length(); ++i) {
+    HPhi* phi = block->phis()->at(i);
+    InferPhiRange(phi);
+  }
+
+  // Go through all instructions of the current block.
+  HInstruction* instr = block->first();
+  while (instr != block->end()) {
+    InferRange(instr);
+    instr = instr->next();
+  }
+
+  // Continue analysis in all dominated blocks.
+  for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+    Analyze(block->dominated_blocks()->at(i));
+  }
+
+  RollBackTo(last_changed_range);
+}
+
+
+void HRangeAnalysis::InferControlFlowRange(HBranch* branch, HBasicBlock* dest) {
+  ASSERT(branch->FirstSuccessor() == dest || branch->SecondSuccessor() == dest);
+  ASSERT(branch->FirstSuccessor() != dest || branch->SecondSuccessor() != dest);
+
+  if (branch->value()->IsCompare()) {
+    HCompare* compare = HCompare::cast(branch->value());
+    Token::Value op = compare->token();
+    if (branch->SecondSuccessor() == dest) {
+      op = Token::NegateCompareOp(op);
+    }
+    Token::Value inverted_op = Token::InvertCompareOp(op);
+    InferControlFlowRange(op, compare->left(), compare->right());
+    InferControlFlowRange(inverted_op, compare->right(), compare->left());
+  }
+}
+
+
+// We know that value [op] other. Use this information to update the range on
+// value.
+void HRangeAnalysis::InferControlFlowRange(Token::Value op,
+                                           HValue* value,
+                                           HValue* other) {
+  Range* range = other->range();
+  if (range == NULL) range = new Range();
+  Range* new_range = NULL;
+
+  TraceRange("Control flow range infer %d %s %d\n",
+             value->id(),
+             Token::Name(op),
+             other->id());
+
+  if (op == Token::EQ || op == Token::EQ_STRICT) {
+    // The same range has to apply for value.
+    new_range = range->Copy();
+  } else if (op == Token::LT || op == Token::LTE) {
+    new_range = range->CopyClearLower();
+    if (op == Token::LT) {
+      new_range->AddConstant(-1);
+    }
+  } else if (op == Token::GT || op == Token::GTE) {
+    new_range = range->CopyClearUpper();
+    if (op == Token::GT) {
+      new_range->AddConstant(1);
+    }
+  }
+
+  if (new_range != NULL && !new_range->IsMostGeneric()) {
+    AddRange(value, new_range);
+  }
+}
+
+
+void HRangeAnalysis::InferPhiRange(HPhi* phi) {
+  // TODO(twuerthinger): Infer loop phi ranges.
+  InferRange(phi);
+}
+
+
+void HRangeAnalysis::InferRange(HValue* value) {
+  ASSERT(!value->HasRange());
+  if (!value->representation().IsNone()) {
+    value->ComputeInitialRange();
+    Range* range = value->range();
+    TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
+               value->id(),
+               value->Mnemonic(),
+               range->lower(),
+               range->upper());
+  }
+}
+
+
+void HRangeAnalysis::RollBackTo(int index) {
+  for (int i = index + 1; i < changed_ranges_.length(); ++i) {
+    changed_ranges_[i]->RemoveLastAddedRange();
+  }
+  changed_ranges_.Rewind(index + 1);
+}
+
+
+void HRangeAnalysis::AddRange(HValue* value, Range* range) {
+  Range* original_range = value->range();
+  value->AddNewRange(range);
+  changed_ranges_.Add(value);
+  Range* new_range = value->range();
+  TraceRange("Updated range of %d set to [%d,%d]\n",
+             value->id(),
+             new_range->lower(),
+             new_range->upper());
+  if (original_range != NULL) {
+    TraceRange("Original range was [%d,%d]\n",
+               original_range->lower(),
+               original_range->upper());
+  }
+  TraceRange("New information was [%d,%d]\n",
+             range->lower(),
+             range->upper());
+}
+
+
+void TraceGVN(const char* msg, ...) {
+  if (FLAG_trace_gvn) {
+    va_list arguments;
+    va_start(arguments, msg);
+    OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+HValueMap::HValueMap(const HValueMap* other)
+    : array_size_(other->array_size_),
+      lists_size_(other->lists_size_),
+      count_(other->count_),
+      present_flags_(other->present_flags_),
+      array_(Zone::NewArray<HValueMapListElement>(other->array_size_)),
+      lists_(Zone::NewArray<HValueMapListElement>(other->lists_size_)),
+      free_list_head_(other->free_list_head_) {
+  memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+  memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+}
+
+
+void HValueMap::Kill(int flags) {
+  int depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+  if ((present_flags_ & depends_flags) == 0) return;
+  present_flags_ = 0;
+  for (int i = 0; i < array_size_; ++i) {
+    HValue* value = array_[i].value;
+    if (value != NULL) {
+      // Clear list of collisions first, so we know if it becomes empty.
+      int kept = kNil;  // List of kept elements.
+      int next;
+      for (int current = array_[i].next; current != kNil; current = next) {
+        next = lists_[current].next;
+        if ((lists_[current].value->flags() & depends_flags) != 0) {
+          // Drop it.
+          count_--;
+          lists_[current].next = free_list_head_;
+          free_list_head_ = current;
+        } else {
+          // Keep it.
+          lists_[current].next = kept;
+          kept = current;
+          present_flags_ |= lists_[current].value->flags();
+        }
+      }
+      array_[i].next = kept;
+
+      // Now possibly drop directly indexed element.
+      if ((array_[i].value->flags() & depends_flags) != 0) {  // Drop it.
+        count_--;
+        int head = array_[i].next;
+        if (head == kNil) {
+          array_[i].value = NULL;
+        } else {
+          array_[i].value = lists_[head].value;
+          array_[i].next = lists_[head].next;
+          lists_[head].next = free_list_head_;
+          free_list_head_ = head;
+        }
+      } else {
+        present_flags_ |= array_[i].value->flags();  // Keep it.
+      }
+    }
+  }
+}
+
+
+HValue* HValueMap::Lookup(HValue* value) const {
+  uint32_t hash = static_cast<uint32_t>(value->Hashcode());
+  uint32_t pos = Bound(hash);
+  if (array_[pos].value != NULL) {
+    if (array_[pos].value->Equals(value)) return array_[pos].value;
+    int next = array_[pos].next;
+    while (next != kNil) {
+      if (lists_[next].value->Equals(value)) return lists_[next].value;
+      next = lists_[next].next;
+    }
+  }
+  return NULL;
+}
+
+
+void HValueMap::Resize(int new_size) {
+  ASSERT(new_size > count_);
+  // Hashing the values into the new array has no more collisions than in the
+  // old hash map, so we can use the existing lists_ array, if we are careful.
+
+  // Make sure we have at least one free element.
+  if (free_list_head_ == kNil) {
+    ResizeLists(lists_size_ << 1);
+  }
+
+  HValueMapListElement* new_array =
+      Zone::NewArray<HValueMapListElement>(new_size);
+  memset(new_array, 0, sizeof(HValueMapListElement) * new_size);
+
+  HValueMapListElement* old_array = array_;
+  int old_size = array_size_;
+
+  int old_count = count_;
+  count_ = 0;
+  // Do not modify present_flags_.  It is currently correct.
+  array_size_ = new_size;
+  array_ = new_array;
+
+  if (old_array != NULL) {
+    // Iterate over all the elements in lists, rehashing them.
+    for (int i = 0; i < old_size; ++i) {
+      if (old_array[i].value != NULL) {
+        int current = old_array[i].next;
+        while (current != kNil) {
+          Insert(lists_[current].value);
+          int next = lists_[current].next;
+          lists_[current].next = free_list_head_;
+          free_list_head_ = current;
+          current = next;
+        }
+        // Rehash the directly stored value.
+        Insert(old_array[i].value);
+      }
+    }
+  }
+  USE(old_count);
+  ASSERT(count_ == old_count);
+}
+
+
+void HValueMap::ResizeLists(int new_size) {
+  ASSERT(new_size > lists_size_);
+
+  HValueMapListElement* new_lists =
+      Zone::NewArray<HValueMapListElement>(new_size);
+  memset(new_lists, 0, sizeof(HValueMapListElement) * new_size);
+
+  HValueMapListElement* old_lists = lists_;
+  int old_size = lists_size_;
+
+  lists_size_ = new_size;
+  lists_ = new_lists;
+
+  if (old_lists != NULL) {
+    memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+  }
+  for (int i = old_size; i < lists_size_; ++i) {
+    lists_[i].next = free_list_head_;
+    free_list_head_ = i;
+  }
+}
+
+
+void HValueMap::Insert(HValue* value) {
+  ASSERT(value != NULL);
+  // Resizing when half of the hashtable is filled up.
+  if (count_ >= array_size_ >> 1) Resize(array_size_ << 1);
+  ASSERT(count_ < array_size_);
+  count_++;
+  uint32_t pos = Bound(static_cast<uint32_t>(value->Hashcode()));
+  if (array_[pos].value == NULL) {
+    array_[pos].value = value;
+    array_[pos].next = kNil;
+  } else {
+    if (free_list_head_ == kNil) {
+      ResizeLists(lists_size_ << 1);
+    }
+    int new_element_pos = free_list_head_;
+    ASSERT(new_element_pos != kNil);
+    free_list_head_ = lists_[free_list_head_].next;
+    lists_[new_element_pos].value = value;
+    lists_[new_element_pos].next = array_[pos].next;
+    ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].value != NULL);
+    array_[pos].next = new_element_pos;
+  }
+}
+
+
+class HStackCheckEliminator BASE_EMBEDDED {
+ public:
+  explicit HStackCheckEliminator(HGraph* graph) : graph_(graph) { }
+
+  void Process();
+
+ private:
+  void RemoveStackCheck(HBasicBlock* block);
+
+  HGraph* graph_;
+};
+
+
+void HStackCheckEliminator::Process() {
+  // For each loop block walk the dominator tree from the backwards branch to
+  // the loop header. If a call instruction is encountered the backwards branch
+  // is dominated by a call and the stack check in the backwards branch can be
+  // removed.
+  for (int i = 0; i < graph_->blocks()->length(); i++) {
+    HBasicBlock* block = graph_->blocks()->at(i);
+    if (block->IsLoopHeader()) {
+      HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+      HBasicBlock* dominator = back_edge;
+      bool back_edge_dominated_by_call = false;
+      while (dominator != block && !back_edge_dominated_by_call) {
+        HInstruction* instr = dominator->first();
+        while (instr != NULL && !back_edge_dominated_by_call) {
+          if (instr->IsCall()) {
+            RemoveStackCheck(back_edge);
+            back_edge_dominated_by_call = true;
+          }
+          instr = instr->next();
+        }
+        dominator = dominator->dominator();
+      }
+    }
+  }
+}
+
+
+void HStackCheckEliminator::RemoveStackCheck(HBasicBlock* block) {
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    if (instr->IsGoto()) {
+      HGoto::cast(instr)->set_include_stack_check(false);
+      return;
+    }
+    instr = instr->next();
+  }
+}
+
+
+class HGlobalValueNumberer BASE_EMBEDDED {
+ public:
+  explicit HGlobalValueNumberer(HGraph* graph)
+      : graph_(graph),
+        block_side_effects_(graph_->blocks()->length()),
+        loop_side_effects_(graph_->blocks()->length()) {
+    ASSERT(Heap::allow_allocation(false));
+    block_side_effects_.AddBlock(0, graph_->blocks()->length());
+    loop_side_effects_.AddBlock(0, graph_->blocks()->length());
+  }
+  ~HGlobalValueNumberer() {
+    ASSERT(!Heap::allow_allocation(true));
+  }
+
+  void Analyze();
+
+ private:
+  void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
+  void ComputeBlockSideEffects();
+  void LoopInvariantCodeMotion();
+  void ProcessLoopBlock(HBasicBlock* block,
+                        HBasicBlock* before_loop,
+                        int loop_kills);
+  bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+
+  HGraph* graph_;
+
+  // A map of block IDs to their side effects.
+  ZoneList<int> block_side_effects_;
+
+  // A map of loop header block IDs to their loop's side effects.
+  ZoneList<int> loop_side_effects_;
+};
+
+
+void HGlobalValueNumberer::Analyze() {
+  ComputeBlockSideEffects();
+  if (FLAG_loop_invariant_code_motion) {
+    LoopInvariantCodeMotion();
+  }
+  HValueMap* map = new HValueMap();
+  AnalyzeBlock(graph_->blocks()->at(0), map);
+}
+
+
+void HGlobalValueNumberer::ComputeBlockSideEffects() {
+  for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+    // Compute side effects for the block.
+    HBasicBlock* block = graph_->blocks()->at(i);
+    HInstruction* instr = block->first();
+    int id = block->block_id();
+    int side_effects = 0;
+    while (instr != NULL) {
+      side_effects |= (instr->flags() & HValue::ChangesFlagsMask());
+      instr = instr->next();
+    }
+    block_side_effects_[id] |= side_effects;
+
+    // Loop headers are part of their loop.
+    if (block->IsLoopHeader()) {
+      loop_side_effects_[id] |= side_effects;
+    }
+
+    // Propagate loop side effects upwards.
+    if (block->HasParentLoopHeader()) {
+      int header_id = block->parent_loop_header()->block_id();
+      loop_side_effects_[header_id] |=
+          block->IsLoopHeader() ? loop_side_effects_[id] : side_effects;
+    }
+  }
+}
+
+
+void HGlobalValueNumberer::LoopInvariantCodeMotion() {
+  for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
+    HBasicBlock* block = graph_->blocks()->at(i);
+    if (block->IsLoopHeader()) {
+      int side_effects = loop_side_effects_[block->block_id()];
+      TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
+               block->block_id(),
+               side_effects);
+
+      HBasicBlock* last = block->loop_information()->GetLastBackEdge();
+      for (int j = block->block_id(); j <= last->block_id(); ++j) {
+        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects);
+      }
+    }
+  }
+}
+
+
+void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
+                                            HBasicBlock* loop_header,
+                                            int loop_kills) {
+  HBasicBlock* pre_header = loop_header->predecessors()->at(0);
+  int depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+  TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
+           block->block_id(),
+           depends_flags);
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    HInstruction* next = instr->next();
+    if (instr->CheckFlag(HValue::kUseGVN) &&
+        (instr->flags() & depends_flags) == 0) {
+      TraceGVN("Checking instruction %d (%s)\n",
+               instr->id(),
+               instr->Mnemonic());
+      bool inputs_loop_invariant = true;
+      for (int i = 0; i < instr->OperandCount(); ++i) {
+        if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+          inputs_loop_invariant = false;
+        }
+      }
+
+      if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+        TraceGVN("Found loop invariant instruction %d\n", instr->id());
+        // Move the instruction out of the loop.
+        instr->Unlink();
+        instr->InsertBefore(pre_header->end());
+      }
+    }
+    instr = next;
+  }
+}
+
+// Only move instructions that postdominate the loop header (i.e. are
+// always executed inside the loop). This is to avoid unnecessary
+// deoptimizations assuming the loop is executed at least once.
+// TODO(fschneider): Better type feedback should give us information
+// about code that was never executed.
+bool HGlobalValueNumberer::ShouldMove(HInstruction* instr,
+                                      HBasicBlock* loop_header) {
+  if (!instr->IsChange() &&
+      FLAG_aggressive_loop_invariant_motion) return true;
+  HBasicBlock* block = instr->block();
+  bool result = true;
+  if (block != loop_header) {
+    for (int i = 1; i < loop_header->predecessors()->length(); ++i) {
+      bool found = false;
+      HBasicBlock* pred = loop_header->predecessors()->at(i);
+      while (pred != loop_header) {
+        if (pred == block) found = true;
+        pred = pred->dominator();
+      }
+      if (!found) {
+        result = false;
+        break;
+      }
+    }
+  }
+  return result;
+}
+
+
+void HGlobalValueNumberer::AnalyzeBlock(HBasicBlock* block, HValueMap* map) {
+  TraceGVN("Analyzing block B%d\n", block->block_id());
+
+  // If this is a loop header kill everything killed by the loop.
+  if (block->IsLoopHeader()) {
+    map->Kill(loop_side_effects_[block->block_id()]);
+  }
+
+  // Go through all instructions of the current block.
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    HInstruction* next = instr->next();
+    int flags = (instr->flags() & HValue::ChangesFlagsMask());
+    if (flags != 0) {
+      ASSERT(!instr->CheckFlag(HValue::kUseGVN));
+      // Clear all instructions in the map that are affected by side effects.
+      map->Kill(flags);
+      TraceGVN("Instruction %d kills\n", instr->id());
+    } else if (instr->CheckFlag(HValue::kUseGVN)) {
+      HValue* other = map->Lookup(instr);
+      if (other != NULL) {
+        ASSERT(instr->Equals(other) && other->Equals(instr));
+        TraceGVN("Replacing value %d (%s) with value %d (%s)\n",
+                 instr->id(),
+                 instr->Mnemonic(),
+                 other->id(),
+                 other->Mnemonic());
+        instr->ReplaceValue(other);
+        instr->Delete();
+      } else {
+        map->Add(instr);
+      }
+    }
+    instr = next;
+  }
+
+  // Recursively continue analysis for all immediately dominated blocks.
+  int length = block->dominated_blocks()->length();
+  for (int i = 0; i < length; ++i) {
+    HBasicBlock* dominated = block->dominated_blocks()->at(i);
+    // No need to copy the map for the last child in the dominator tree.
+    HValueMap* successor_map = (i == length - 1) ? map : map->Copy();
+
+    // If the dominated block is not a successor to this block we have to
+    // kill everything killed on any path between this block and the
+    // dominated block.  Note we rely on the block ordering.
+    bool is_successor = false;
+    int predecessor_count = dominated->predecessors()->length();
+    for (int j = 0; !is_successor && j < predecessor_count; ++j) {
+      is_successor = (dominated->predecessors()->at(j) == block);
+    }
+
+    if (!is_successor) {
+      int side_effects = 0;
+      for (int j = block->block_id() + 1; j < dominated->block_id(); ++j) {
+        side_effects |= block_side_effects_[j];
+      }
+      successor_map->Kill(side_effects);
+    }
+
+    AnalyzeBlock(dominated, successor_map);
+  }
+}
+
+
+class HInferRepresentation BASE_EMBEDDED {
+ public:
+  explicit HInferRepresentation(HGraph* graph)
+      : graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {}
+
+  void Analyze();
+
+ private:
+  Representation TryChange(HValue* current);
+  void AddToWorklist(HValue* current);
+  void InferBasedOnInputs(HValue* current);
+  void AddDependantsToWorklist(HValue* current);
+  void InferBasedOnUses(HValue* current);
+
+  HGraph* graph_;
+  ZoneList<HValue*> worklist_;
+  BitVector in_worklist_;
+};
+
+
+void HInferRepresentation::AddToWorklist(HValue* current) {
+  if (current->representation().IsSpecialization()) return;
+  if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
+  if (in_worklist_.Contains(current->id())) return;
+  worklist_.Add(current);
+  in_worklist_.Add(current->id());
+}
+
+
+// This method tries to specialize the representation type of the value
+// given as a parameter. The value is asked to infer its representation type
+// based on its inputs. If the inferred type is more specialized, then this
+// becomes the new representation type of the node.
+void HInferRepresentation::InferBasedOnInputs(HValue* current) {
+  Representation r = current->representation();
+  if (r.IsSpecialization()) return;
+  ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
+  Representation inferred = current->InferredRepresentation();
+  if (inferred.IsSpecialization()) {
+    current->ChangeRepresentation(inferred);
+    AddDependantsToWorklist(current);
+  }
+}
+
+
+void HInferRepresentation::AddDependantsToWorklist(HValue* current) {
+  for (int i = 0; i < current->uses()->length(); ++i) {
+    AddToWorklist(current->uses()->at(i));
+  }
+  for (int i = 0; i < current->OperandCount(); ++i) {
+    AddToWorklist(current->OperandAt(i));
+  }
+}
+
+
+// This method calculates whether specializing the representation of the value
+// given as the parameter has a benefit in terms of less necessary type
+// conversions. If there is a benefit, then the representation of the value is
+// specialized.
+void HInferRepresentation::InferBasedOnUses(HValue* current) {
+  Representation r = current->representation();
+  if (r.IsSpecialization() || current->HasNoUses()) return;
+  ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
+  Representation new_rep = TryChange(current);
+  if (!new_rep.IsNone()) {
+    if (!current->representation().Equals(new_rep)) {
+      current->ChangeRepresentation(new_rep);
+      AddDependantsToWorklist(current);
+    }
+  }
+}
+
+
+Representation HInferRepresentation::TryChange(HValue* current) {
+  // Array of use counts for each representation.
+  int use_count[Representation::kNumRepresentations];
+  for (int i = 0; i < Representation::kNumRepresentations; i++) {
+    use_count[i] = 0;
+  }
+
+  for (int i = 0; i < current->uses()->length(); ++i) {
+    HValue* use = current->uses()->at(i);
+    int index = use->LookupOperandIndex(0, current);
+    Representation req_rep = use->RequiredInputRepresentation(index);
+    if (req_rep.IsNone()) continue;
+    if (use->IsPhi()) {
+      HPhi* phi = HPhi::cast(use);
+      phi->AddIndirectUsesTo(&use_count[0]);
+    }
+    use_count[req_rep.kind()]++;
+  }
+  int tagged_count = use_count[Representation::kTagged];
+  int double_count = use_count[Representation::kDouble];
+  int int32_count = use_count[Representation::kInteger32];
+  int non_tagged_count = double_count + int32_count;
+
+  // If a non-loop phi has tagged uses, don't convert it to untagged.
+  if (current->IsPhi() && !current->block()->IsLoopHeader()) {
+    if (tagged_count > 0) return Representation::None();
+  }
+
+  if (non_tagged_count >= tagged_count) {
+    // More untagged than tagged.
+    if (double_count > 0) {
+      // There is at least one usage that is a double => guess that the
+      // correct representation is double.
+      return Representation::Double();
+    } else if (int32_count > 0) {
+      return Representation::Integer32();
+    }
+  }
+  return Representation::None();
+}
+
+
+void HInferRepresentation::Analyze() {
+  HPhase phase("Infer representations", graph_);
+
+  // (1) Initialize bit vectors and count real uses. Each phi
+  // gets a bit-vector of length <number of phis>.
+  const ZoneList<HPhi*>* phi_list = graph_->phi_list();
+  int num_phis = phi_list->length();
+  ScopedVector<BitVector*> connected_phis(num_phis);
+  for (int i = 0; i < num_phis; i++) {
+    phi_list->at(i)->InitRealUses(i);
+    connected_phis[i] = new BitVector(num_phis);
+    connected_phis[i]->Add(i);
+  }
+
+  // (2) Do a fixed point iteration to find the set of connected phis.
+  // A phi is connected to another phi if its value is used either
+  // directly or indirectly through a transitive closure of the def-use
+  // relation.
+  bool change = true;
+  while (change) {
+    change = false;
+    for (int i = 0; i < num_phis; i++) {
+      HPhi* phi = phi_list->at(i);
+      for (int j = 0; j < phi->uses()->length(); j++) {
+        HValue* use = phi->uses()->at(j);
+        if (use->IsPhi()) {
+          int phi_use = HPhi::cast(use)->phi_id();
+          if (connected_phis[i]->UnionIsChanged(*connected_phis[phi_use])) {
+            change = true;
+          }
+        }
+      }
+    }
+  }
+
+  // (3) Sum up the non-phi use counts of all connected phis.
+  // Don't include the non-phi uses of the phi itself.
+  for (int i = 0; i < num_phis; i++) {
+    HPhi* phi = phi_list->at(i);
+    for (BitVector::Iterator it(connected_phis.at(i));
+         !it.Done();
+         it.Advance()) {
+      int index = it.Current();
+      if (index != i) {
+        HPhi* it_use = phi_list->at(it.Current());
+        phi->AddNonPhiUsesFrom(it_use);
+      }
+    }
+  }
+
+  for (int i = 0; i < graph_->blocks()->length(); ++i) {
+    HBasicBlock* block = graph_->blocks()->at(i);
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); ++j) {
+      AddToWorklist(phis->at(j));
+    }
+
+    HInstruction* current = block->first();
+    while (current != NULL) {
+      AddToWorklist(current);
+      current = current->next();
+    }
+  }
+
+  while (!worklist_.is_empty()) {
+    HValue* current = worklist_.RemoveLast();
+    in_worklist_.Remove(current->id());
+    InferBasedOnInputs(current);
+    InferBasedOnUses(current);
+  }
+}
+
+
+void HGraph::InitializeInferredTypes() {
+  HPhase phase("Inferring types", this);
+  InitializeInferredTypes(0, this->blocks_.length() - 1);
+}
+
+
+void HGraph::InitializeInferredTypes(int from_inclusive, int to_inclusive) {
+  for (int i = from_inclusive; i <= to_inclusive; ++i) {
+    HBasicBlock* block = blocks_[i];
+
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      phis->at(j)->UpdateInferredType();
+    }
+
+    HInstruction* current = block->first();
+    while (current != NULL) {
+      current->UpdateInferredType();
+      current = current->next();
+    }
+
+    if (block->IsLoopHeader()) {
+      HBasicBlock* last_back_edge =
+          block->loop_information()->GetLastBackEdge();
+      InitializeInferredTypes(i + 1, last_back_edge->block_id());
+      // Skip all blocks already processed by the recursive call.
+      i = last_back_edge->block_id();
+      // Update phis of the loop header now after the whole loop body is
+      // guaranteed to be processed.
+      ZoneList<HValue*> worklist(block->phis()->length());
+      for (int j = 0; j < block->phis()->length(); ++j) {
+        worklist.Add(block->phis()->at(j));
+      }
+      InferTypes(&worklist);
+    }
+  }
+}
+
+
+void HGraph::PropagateMinusZeroChecks(HValue* value, BitVector* visited) {
+  HValue* current = value;
+  while (current != NULL) {
+    if (visited->Contains(current->id())) return;
+
+    // For phis, we must propagate the check to all of its inputs.
+    if (current->IsPhi()) {
+      visited->Add(current->id());
+      HPhi* phi = HPhi::cast(current);
+      for (int i = 0; i < phi->OperandCount(); ++i) {
+        PropagateMinusZeroChecks(phi->OperandAt(i), visited);
+      }
+      break;
+    }
+
+    // For multiplication and division, we must propagate to the left and
+    // the right side.
+    if (current->IsMul()) {
+      HMul* mul = HMul::cast(current);
+      mul->EnsureAndPropagateNotMinusZero(visited);
+      PropagateMinusZeroChecks(mul->left(), visited);
+      PropagateMinusZeroChecks(mul->right(), visited);
+    } else if (current->IsDiv()) {
+      HDiv* div = HDiv::cast(current);
+      div->EnsureAndPropagateNotMinusZero(visited);
+      PropagateMinusZeroChecks(div->left(), visited);
+      PropagateMinusZeroChecks(div->right(), visited);
+    }
+
+    current = current->EnsureAndPropagateNotMinusZero(visited);
+  }
+}
+
+
+void HGraph::InsertRepresentationChangeForUse(HValue* value,
+                                              HValue* use,
+                                              Representation to,
+                                              bool is_truncating) {
+  // Propagate flags for negative zero checks upwards from conversions
+  // int32-to-tagged and int32-to-double.
+  Representation from = value->representation();
+  if (from.IsInteger32()) {
+    ASSERT(to.IsTagged() || to.IsDouble());
+    BitVector visited(GetMaximumValueID());
+    PropagateMinusZeroChecks(value, &visited);
+  }
+
+  // Insert the representation change right before its use. For phi-uses we
+  // insert at the end of the corresponding predecessor.
+  HBasicBlock* insert_block = use->block();
+  if (use->IsPhi()) {
+    int index = 0;
+    while (use->OperandAt(index) != value) ++index;
+    insert_block = insert_block->predecessors()->at(index);
+  }
+
+  HInstruction* next = (insert_block == use->block())
+      ? HInstruction::cast(use)
+      : insert_block->end();
+
+  // For constants we try to make the representation change at compile
+  // time. When a representation change is not possible without loss of
+  // information we treat constants like normal instructions and insert the
+  // change instructions for them.
+  HInstruction* new_value = NULL;
+  if (value->IsConstant()) {
+    HConstant* constant = HConstant::cast(value);
+    // Try to create a new copy of the constant with the new representation.
+    new_value = is_truncating
+        ? constant->CopyToTruncatedInt32()
+        : constant->CopyToRepresentation(to);
+  }
+
+  if (new_value == NULL) {
+    new_value = new HChange(value, value->representation(), to);
+  }
+
+  new_value->InsertBefore(next);
+  value->ReplaceFirstAtUse(use, new_value, to);
+}
+
+
+int CompareConversionUses(HValue* a,
+                          HValue* b,
+                          Representation a_rep,
+                          Representation b_rep) {
+  if (a_rep.kind() > b_rep.kind()) {
+    // Make sure specializations are separated in the result array.
+    return 1;
+  }
+  // Put truncating conversions before non-truncating conversions.
+  bool a_truncate = a->CheckFlag(HValue::kTruncatingToInt32);
+  bool b_truncate = b->CheckFlag(HValue::kTruncatingToInt32);
+  if (a_truncate != b_truncate) {
+    return a_truncate ? -1 : 1;
+  }
+  // Sort by increasing block ID.
+  return a->block()->block_id() - b->block()->block_id();
+}
+
+
+void HGraph::InsertRepresentationChanges(HValue* current) {
+  Representation r = current->representation();
+  if (r.IsNone()) return;
+  if (current->uses()->length() == 0) return;
+
+  // Collect the representation changes in a sorted list.  This allows
+  // us to avoid duplicate changes without searching the list.
+  ZoneList<HValue*> to_convert(2);
+  ZoneList<Representation> to_convert_reps(2);
+  for (int i = 0; i < current->uses()->length(); ++i) {
+    HValue* use = current->uses()->at(i);
+    // The occurrences index means the index within the operand array of "use"
+    // at which "current" is used. While iterating through the use array we
+    // also have to iterate over the different occurrence indices.
+    int occurrence_index = 0;
+    if (use->UsesMultipleTimes(current)) {
+      occurrence_index = current->uses()->CountOccurrences(use, 0, i - 1);
+      if (FLAG_trace_representation) {
+        PrintF("Instruction %d is used multiple times at %d; occurrence=%d\n",
+               current->id(),
+               use->id(),
+               occurrence_index);
+      }
+    }
+    int operand_index = use->LookupOperandIndex(occurrence_index, current);
+    Representation req = use->RequiredInputRepresentation(operand_index);
+    if (req.IsNone() || req.Equals(r)) continue;
+    int index = 0;
+    while (to_convert.length() > index &&
+           CompareConversionUses(to_convert[index],
+                                 use,
+                                 to_convert_reps[index],
+                                 req) < 0) {
+      ++index;
+    }
+    if (FLAG_trace_representation) {
+      PrintF("Inserting a representation change to %s of %d for use at %d\n",
+             req.Mnemonic(),
+             current->id(),
+             use->id());
+    }
+    to_convert.InsertAt(index, use);
+    to_convert_reps.InsertAt(index, req);
+  }
+
+  for (int i = 0; i < to_convert.length(); ++i) {
+    HValue* use = to_convert[i];
+    Representation r_to = to_convert_reps[i];
+    bool is_truncating = use->CheckFlag(HValue::kTruncatingToInt32);
+    InsertRepresentationChangeForUse(current, use, r_to, is_truncating);
+  }
+
+  if (current->uses()->is_empty()) {
+    ASSERT(current->IsConstant());
+    current->Delete();
+  }
+}
+
+
+void HGraph::InsertRepresentationChanges() {
+  HPhase phase("Insert representation changes", this);
+
+
+  // Compute truncation flag for phis: Initially assume that all
+  // int32-phis allow truncation and iteratively remove the ones that
+  // are used in an operation that does not allow a truncating
+  // conversion.
+  // TODO(fschneider): Replace this with a worklist-based iteration.
+  for (int i = 0; i < phi_list()->length(); i++) {
+    HPhi* phi = phi_list()->at(i);
+    if (phi->representation().IsInteger32()) {
+      phi->SetFlag(HValue::kTruncatingToInt32);
+    }
+  }
+  bool change = true;
+  while (change) {
+    change = false;
+    for (int i = 0; i < phi_list()->length(); i++) {
+      HPhi* phi = phi_list()->at(i);
+      if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
+      for (int j = 0; j < phi->uses()->length(); j++) {
+        HValue* use = phi->uses()->at(j);
+        if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
+          phi->ClearFlag(HValue::kTruncatingToInt32);
+          change = true;
+          break;
+        }
+      }
+    }
+  }
+
+  for (int i = 0; i < blocks_.length(); ++i) {
+    // Process phi instructions first.
+    for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      InsertRepresentationChanges(phi);
+    }
+
+    // Process normal instructions.
+    HInstruction* current = blocks_[i]->first();
+    while (current != NULL) {
+      InsertRepresentationChanges(current);
+      current = current->next();
+    }
+  }
+}
+
+
+// Implementation of utility classes to represent an expression's context in
+// the AST.
+AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+    : owner_(owner), kind_(kind), outer_(owner->ast_context()) {
+  owner->set_ast_context(this);  // Push.
+#ifdef DEBUG
+  original_count_ = owner->environment()->total_count();
+#endif
+}
+
+
+AstContext::~AstContext() {
+  owner_->set_ast_context(outer_);  // Pop.
+}
+
+
+EffectContext::~EffectContext() {
+  ASSERT(owner()->HasStackOverflow() ||
+         !owner()->subgraph()->HasExit() ||
+         owner()->environment()->total_count() == original_count_);
+}
+
+
+ValueContext::~ValueContext() {
+  ASSERT(owner()->HasStackOverflow() ||
+         !owner()->subgraph()->HasExit() ||
+         owner()->environment()->total_count() == original_count_ + 1);
+}
+
+
+void EffectContext::ReturnValue(HValue* value) {
+  // The value is simply ignored.
+}
+
+
+void ValueContext::ReturnValue(HValue* value) {
+  // The value is tracked in the bailout environment, and communicated
+  // through the environment as the result of the expression.
+  owner()->Push(value);
+}
+
+
+void TestContext::ReturnValue(HValue* value) {
+  BuildBranch(value);
+}
+
+
+void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  owner()->AddInstruction(instr);
+  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+}
+
+
+void ValueContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  owner()->AddInstruction(instr);
+  owner()->Push(instr);
+  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
+}
+
+
+void TestContext::ReturnInstruction(HInstruction* instr, int ast_id) {
+  HGraphBuilder* builder = owner();
+  builder->AddInstruction(instr);
+  // We expect a simulate after every expression with side effects, though
+  // this one isn't actually needed (and wouldn't work if it were targeted).
+  if (instr->HasSideEffects()) {
+    builder->Push(instr);
+    builder->AddSimulate(ast_id);
+    builder->Pop();
+  }
+  BuildBranch(instr);
+}
+
+
+void TestContext::BuildBranch(HValue* value) {
+  // We expect the graph to be in edge-split form: there is no edge that
+  // connects a branch node to a join node.  We conservatively ensure that
+  // property by always adding an empty block on the outgoing edges of this
+  // branch.
+  HGraphBuilder* builder = owner();
+  HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
+  HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
+  HBranch* branch = new HBranch(empty_true, empty_false, value);
+  builder->CurrentBlock()->Finish(branch);
+
+  HValue* const no_return_value = NULL;
+  HBasicBlock* true_target = if_true();
+  if (true_target->IsInlineReturnTarget()) {
+    empty_true->AddLeaveInlined(no_return_value, true_target);
+  } else {
+    empty_true->Goto(true_target);
+  }
+
+  HBasicBlock* false_target = if_false();
+  if (false_target->IsInlineReturnTarget()) {
+    empty_false->AddLeaveInlined(no_return_value, false_target);
+  } else {
+    empty_false->Goto(false_target);
+  }
+  builder->subgraph()->set_exit_block(NULL);
+}
+
+
+// HGraphBuilder infrastructure for bailing out and checking bailouts.
+#define BAILOUT(reason)                         \
+  do {                                          \
+    Bailout(reason);                            \
+    return;                                     \
+  } while (false)
+
+
+#define CHECK_BAILOUT                           \
+  do {                                          \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+#define VISIT_FOR_EFFECT(expr)                  \
+  do {                                          \
+    VisitForEffect(expr);                       \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+#define VISIT_FOR_VALUE(expr)                   \
+  do {                                          \
+    VisitForValue(expr);                        \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+#define VISIT_FOR_CONTROL(expr, true_block, false_block)        \
+  do {                                                          \
+    VisitForControl(expr, true_block, false_block);             \
+    if (HasStackOverflow()) return;                             \
+  } while (false)
+
+
+// 'thing' could be an expression, statement, or list of statements.
+#define ADD_TO_SUBGRAPH(graph, thing)       \
+  do {                                      \
+    AddToSubgraph(graph, thing);            \
+    if (HasStackOverflow()) return;         \
+  } while (false)
+
+
+class HGraphBuilder::SubgraphScope BASE_EMBEDDED {
+ public:
+  SubgraphScope(HGraphBuilder* builder, HSubgraph* new_subgraph)
+      : builder_(builder) {
+    old_subgraph_ = builder_->current_subgraph_;
+    subgraph_ = new_subgraph;
+    builder_->current_subgraph_ = subgraph_;
+  }
+
+  ~SubgraphScope() {
+    old_subgraph_->AddBreakContinueInfo(subgraph_);
+    builder_->current_subgraph_ = old_subgraph_;
+  }
+
+  HSubgraph* subgraph() const { return subgraph_; }
+
+ private:
+  HGraphBuilder* builder_;
+  HSubgraph* old_subgraph_;
+  HSubgraph* subgraph_;
+};
+
+
+void HGraphBuilder::Bailout(const char* reason) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Bailout in HGraphBuilder: @\"%s\": %s\n", *debug_name, reason);
+  }
+  SetStackOverflow();
+}
+
+
+void HGraphBuilder::VisitForEffect(Expression* expr) {
+  EffectContext for_effect(this);
+  Visit(expr);
+}
+
+
+void HGraphBuilder::VisitForValue(Expression* expr) {
+  ValueContext for_value(this);
+  Visit(expr);
+}
+
+
+void HGraphBuilder::VisitForControl(Expression* expr,
+                                    HBasicBlock* true_block,
+                                    HBasicBlock* false_block) {
+  TestContext for_test(this, true_block, false_block);
+  Visit(expr);
+}
+
+
+HValue* HGraphBuilder::VisitArgument(Expression* expr) {
+  VisitForValue(expr);
+  if (HasStackOverflow() || !subgraph()->HasExit()) return NULL;
+  return environment()->Top();
+}
+
+
+void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+  for (int i = 0; i < arguments->length(); i++) {
+    VisitArgument(arguments->at(i));
+    if (HasStackOverflow() || !current_subgraph_->HasExit()) return;
+  }
+}
+
+
+HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
+  ASSERT(current_subgraph_ == NULL);
+  graph_ = new HGraph(info);
+
+  {
+    HPhase phase("Block building");
+    graph_->Initialize(CreateBasicBlock(graph_->start_environment()));
+    current_subgraph_ = graph_;
+
+    Scope* scope = info->scope();
+    SetupScope(scope);
+    VisitDeclarations(scope->declarations());
+
+    AddInstruction(new HStackCheck());
+
+    ZoneList<Statement*>* stmts = info->function()->body();
+    HSubgraph* body = CreateGotoSubgraph(environment());
+    AddToSubgraph(body, stmts);
+    if (HasStackOverflow()) return NULL;
+    current_subgraph_->Append(body, NULL);
+    body->entry_block()->SetJoinId(info->function()->id());
+
+    if (graph_->HasExit()) {
+      graph_->FinishExit(new HReturn(graph_->GetConstantUndefined()));
+    }
+  }
+
+  graph_->OrderBlocks();
+  graph_->AssignDominators();
+  graph_->EliminateRedundantPhis();
+  if (!graph_->CollectPhis()) {
+    Bailout("Phi-use of arguments object");
+    return NULL;
+  }
+
+  HInferRepresentation rep(graph_);
+  rep.Analyze();
+
+  if (FLAG_use_range) {
+    HRangeAnalysis rangeAnalysis(graph_);
+    rangeAnalysis.Analyze();
+  }
+
+  graph_->InitializeInferredTypes();
+  graph_->Canonicalize();
+  graph_->InsertRepresentationChanges();
+
+  // Eliminate redundant stack checks on backwards branches.
+  HStackCheckEliminator sce(graph_);
+  sce.Process();
+
+  // Perform common subexpression elimination and loop-invariant code motion.
+  if (FLAG_use_gvn) {
+    HPhase phase("Global value numbering", graph_);
+    HGlobalValueNumberer gvn(graph_);
+    gvn.Analyze();
+  }
+
+  return graph_;
+}
+
+
+void HGraphBuilder::AddToSubgraph(HSubgraph* graph, Statement* stmt) {
+  SubgraphScope scope(this, graph);
+  Visit(stmt);
+}
+
+
+void HGraphBuilder::AddToSubgraph(HSubgraph* graph, Expression* expr) {
+  SubgraphScope scope(this, graph);
+  VisitForValue(expr);
+}
+
+
+void HGraphBuilder::AddToSubgraph(HSubgraph* graph,
+                                  ZoneList<Statement*>* stmts) {
+  SubgraphScope scope(this, graph);
+  VisitStatements(stmts);
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+  ASSERT(current_subgraph_->HasExit());
+  current_subgraph_->exit_block()->AddInstruction(instr);
+  return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(int id) {
+  ASSERT(current_subgraph_->HasExit());
+  current_subgraph_->exit_block()->AddSimulate(id);
+}
+
+
+void HGraphBuilder::AddPhi(HPhi* instr) {
+  ASSERT(current_subgraph_->HasExit());
+  current_subgraph_->exit_block()->AddPhi(instr);
+}
+
+
+void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+  Push(instr);
+  AddInstruction(instr);
+}
+
+
+void HGraphBuilder::PushArgumentsForStubCall(int argument_count) {
+  const int kMaxStubArguments = 4;
+  ASSERT_GE(kMaxStubArguments, argument_count);
+  // Push the arguments on the stack.
+  HValue* arguments[kMaxStubArguments];
+  for (int i = argument_count - 1; i >= 0; i--) {
+    arguments[i] = Pop();
+  }
+  for (int i = 0; i < argument_count; i++) {
+    AddInstruction(new HPushArgument(arguments[i]));
+  }
+}
+
+
+void HGraphBuilder::ProcessCall(HCall* call) {
+  for (int i = call->argument_count() - 1; i >= 0; --i) {
+    HValue* value = Pop();
+    HPushArgument* push = new HPushArgument(value);
+    call->SetArgumentAt(i, push);
+  }
+
+  for (int i = 0; i < call->argument_count(); ++i) {
+    AddInstruction(call->PushArgumentAt(i));
+  }
+}
+
+
+void HGraphBuilder::SetupScope(Scope* scope) {
+  // We don't yet handle the function name for named function expressions.
+  if (scope->function() != NULL) BAILOUT("named function expression");
+
+  // We can't handle heap-allocated locals.
+  if (scope->num_heap_slots() > 0) BAILOUT("heap allocated locals");
+
+  HConstant* undefined_constant =
+      new HConstant(Factory::undefined_value(), Representation::Tagged());
+  AddInstruction(undefined_constant);
+  graph_->set_undefined_constant(undefined_constant);
+
+  // Set the initial values of parameters including "this".  "This" has
+  // parameter index 0.
+  int count = scope->num_parameters() + 1;
+  for (int i = 0; i < count; ++i) {
+    HInstruction* parameter = AddInstruction(new HParameter(i));
+    environment()->Bind(i, parameter);
+  }
+
+  // Set the initial values of stack-allocated locals.
+  for (int i = count; i < environment()->values()->length(); ++i) {
+    environment()->Bind(i, undefined_constant);
+  }
+
+  // Handle the arguments and arguments shadow variables specially (they do
+  // not have declarations).
+  if (scope->arguments() != NULL) {
+    HArgumentsObject* object = new HArgumentsObject;
+    AddInstruction(object);
+    graph()->SetArgumentsObject(object);
+    environment()->Bind(scope->arguments(), object);
+    environment()->Bind(scope->arguments_shadow(), object);
+  }
+}
+
+
+void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    Visit(statements->at(i));
+    if (HasStackOverflow() || !current_subgraph_->HasExit()) break;
+  }
+}
+
+
+HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+  HBasicBlock* b = graph()->CreateBasicBlock();
+  b->SetInitialEnvironment(env);
+  return b;
+}
+
+
+HSubgraph* HGraphBuilder::CreateInlinedSubgraph(HEnvironment* outer,
+                                                Handle<JSFunction> target,
+                                                FunctionLiteral* function) {
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner =
+      outer->CopyForInlining(target, function, true, undefined);
+  HSubgraph* subgraph = new HSubgraph(graph());
+  subgraph->Initialize(CreateBasicBlock(inner));
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateGotoSubgraph(HEnvironment* env) {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  HEnvironment* new_env = env->CopyWithoutHistory();
+  subgraph->Initialize(CreateBasicBlock(new_env));
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateEmptySubgraph() {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  subgraph->Initialize(graph()->CreateBasicBlock());
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateBranchSubgraph(HEnvironment* env) {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  HEnvironment* new_env = env->Copy();
+  subgraph->Initialize(CreateBasicBlock(new_env));
+  return subgraph;
+}
+
+
+HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) {
+  HSubgraph* subgraph = new HSubgraph(graph());
+  HBasicBlock* block = graph()->CreateBasicBlock();
+  HEnvironment* new_env = env->CopyAsLoopHeader(block);
+  block->SetInitialEnvironment(new_env);
+  subgraph->Initialize(block);
+  subgraph->entry_block()->AttachLoopInformation();
+  return subgraph;
+}
+
+
+void HGraphBuilder::VisitBlock(Block* stmt) {
+  if (stmt->labels() != NULL) {
+    HSubgraph* block_graph = CreateGotoSubgraph(environment());
+    ADD_TO_SUBGRAPH(block_graph, stmt->statements());
+    current_subgraph_->Append(block_graph, stmt);
+  } else {
+    VisitStatements(stmt->statements());
+  }
+}
+
+
+void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  VisitForEffect(stmt->expression());
+}
+
+
+void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+}
+
+
+void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  if (stmt->condition()->ToBooleanIsTrue()) {
+    AddSimulate(stmt->ThenId());
+    Visit(stmt->then_statement());
+  } else if (stmt->condition()->ToBooleanIsFalse()) {
+    AddSimulate(stmt->ElseId());
+    Visit(stmt->else_statement());
+  } else {
+    HSubgraph* then_graph = CreateEmptySubgraph();
+    HSubgraph* else_graph = CreateEmptySubgraph();
+    VISIT_FOR_CONTROL(stmt->condition(),
+                      then_graph->entry_block(),
+                      else_graph->entry_block());
+
+    then_graph->entry_block()->SetJoinId(stmt->ThenId());
+    ADD_TO_SUBGRAPH(then_graph, stmt->then_statement());
+
+    else_graph->entry_block()->SetJoinId(stmt->ElseId());
+    ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
+
+    current_subgraph_->AppendJoin(then_graph, else_graph, stmt);
+  }
+}
+
+
+void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  current_subgraph_->FinishBreakContinue(stmt->target(), true);
+}
+
+
+void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  current_subgraph_->FinishBreakContinue(stmt->target(), false);
+}
+
+
+void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  AstContext* context = call_context();
+  if (context == NULL) {
+    // Not an inlined return, so an actual one.
+    VISIT_FOR_VALUE(stmt->expression());
+    HValue* result = environment()->Pop();
+    subgraph()->FinishExit(new HReturn(result));
+  } else {
+    // Return from an inlined function, visit the subexpression in the
+    // expression context of the call.
+    if (context->IsTest()) {
+      TestContext* test = TestContext::cast(context);
+      VisitForControl(stmt->expression(),
+                      test->if_true(),
+                      test->if_false());
+    } else {
+      HValue* return_value = NULL;
+      if (context->IsEffect()) {
+        VISIT_FOR_EFFECT(stmt->expression());
+        return_value = graph()->GetConstantUndefined();
+      } else {
+        ASSERT(context->IsValue());
+        VISIT_FOR_VALUE(stmt->expression());
+        return_value = environment()->Pop();
+      }
+      subgraph()->exit_block()->AddLeaveInlined(return_value,
+                                                function_return_);
+      subgraph()->set_exit_block(NULL);
+    }
+  }
+}
+
+
+void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  BAILOUT("WithEnterStatement");
+}
+
+
+void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+  BAILOUT("WithExitStatement");
+}
+
+
+HCompare* HGraphBuilder::BuildSwitchCompare(HSubgraph* subgraph,
+                                            HValue* switch_value,
+                                            CaseClause* clause) {
+  AddToSubgraph(subgraph, clause->label());
+  if (HasStackOverflow()) return NULL;
+  HValue* clause_value = subgraph->environment()->Pop();
+  HCompare* compare = new HCompare(switch_value,
+                                   clause_value,
+                                   Token::EQ_STRICT);
+  compare->SetInputRepresentation(Representation::Integer32());
+  subgraph->exit_block()->AddInstruction(compare);
+  return compare;
+}
+
+
+void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  VISIT_FOR_VALUE(stmt->tag());
+  // TODO(3168478): simulate added for tag should be enough.
+  AddSimulate(stmt->EntryId());
+  HValue* switch_value = Pop();
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  int num_clauses = clauses->length();
+  if (num_clauses == 0) return;
+  if (num_clauses > 128) BAILOUT("SwitchStatement: too many clauses");
+
+  int num_smi_clauses = num_clauses;
+  for (int i = 0; i < num_clauses; i++) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) continue;
+    clause->RecordTypeFeedback(oracle());
+    if (!clause->IsSmiCompare()) {
+      if (i == 0) BAILOUT("SwitchStatement: no smi compares");
+      // We will deoptimize if the first non-smi compare is reached.
+      num_smi_clauses = i;
+      break;
+    }
+    if (!clause->label()->IsSmiLiteral()) {
+      BAILOUT("SwitchStatement: non-literal switch label");
+    }
+  }
+
+  // The single exit block of the whole switch statement.
+  HBasicBlock* single_exit_block = graph_->CreateBasicBlock();
+
+  // Build a series of empty subgraphs for the comparisons.
+  // The default clause does not have a comparison subgraph.
+  ZoneList<HSubgraph*> compare_graphs(num_smi_clauses);
+  for (int i = 0; i < num_smi_clauses; i++) {
+    if (clauses->at(i)->is_default()) {
+      compare_graphs.Add(NULL);
+    } else {
+      compare_graphs.Add(CreateEmptySubgraph());
+    }
+  }
+
+  HSubgraph* prev_graph = current_subgraph_;
+  HCompare* prev_compare_inst = NULL;
+  for (int i = 0; i < num_smi_clauses; i++) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) continue;
+
+    // Finish the previous graph by connecting it to the current.
+    HSubgraph* subgraph = compare_graphs.at(i);
+    if (prev_compare_inst == NULL) {
+      ASSERT(prev_graph == current_subgraph_);
+      prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block()));
+    } else {
+      HBasicBlock* empty = graph()->CreateBasicBlock();
+      prev_graph->exit_block()->Finish(new HBranch(empty,
+                                                   subgraph->entry_block(),
+                                                   prev_compare_inst));
+    }
+
+    // Build instructions for current subgraph.
+    ASSERT(clause->IsSmiCompare());
+    prev_compare_inst = BuildSwitchCompare(subgraph, switch_value, clause);
+    if (HasStackOverflow()) return;
+
+    prev_graph = subgraph;
+  }
+
+  // Finish last comparison if there was at least one comparison.
+  // last_false_block is the (empty) false-block of the last comparison. If
+  // there are no comparisons at all (a single default clause), it is just
+  // the last block of the current subgraph.
+  HBasicBlock* last_false_block = current_subgraph_->exit_block();
+  if (prev_graph != current_subgraph_) {
+    last_false_block = graph()->CreateBasicBlock();
+    HBasicBlock* empty = graph()->CreateBasicBlock();
+    prev_graph->exit_block()->Finish(new HBranch(empty,
+                                                 last_false_block,
+                                                 prev_compare_inst));
+  }
+
+  // If we have a non-smi compare clause, we deoptimize after trying
+  // all the previous compares.
+  if (num_smi_clauses < num_clauses) {
+    last_false_block->Finish(new HDeoptimize);
+  }
+
+  // Build statement blocks, connect them to their comparison block and
+  // to the previous statement block, if there is a fall-through.
+  HSubgraph* previous_subgraph = NULL;
+  for (int i = 0; i < num_clauses; i++) {
+    CaseClause* clause = clauses->at(i);
+    // Subgraph for the statements of the clause is only created when
+    // it's reachable either from the corresponding compare or as a
+    // fall-through from previous statements.
+    HSubgraph* subgraph = NULL;
+
+    if (i < num_smi_clauses) {
+      if (clause->is_default()) {
+        if (!last_false_block->IsFinished()) {
+          // Default clause: Connect it to the last false block.
+          subgraph = CreateEmptySubgraph();
+          last_false_block->Finish(new HGoto(subgraph->entry_block()));
+        }
+      } else {
+        ASSERT(clause->IsSmiCompare());
+        // Connect with the corresponding comparison.
+        subgraph = CreateEmptySubgraph();
+        HBasicBlock* empty =
+            compare_graphs.at(i)->exit_block()->end()->FirstSuccessor();
+        empty->Finish(new HGoto(subgraph->entry_block()));
+      }
+    }
+
+    // Check for fall-through from previous statement block.
+    if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
+      if (subgraph == NULL) subgraph = CreateEmptySubgraph();
+      previous_subgraph->exit_block()->
+          Finish(new HGoto(subgraph->entry_block()));
+    }
+
+    if (subgraph != NULL) {
+      ADD_TO_SUBGRAPH(subgraph, clause->statements());
+      HBasicBlock* break_block = subgraph->BundleBreak(stmt);
+      if (break_block != NULL) {
+        break_block->Finish(new HGoto(single_exit_block));
+      }
+    }
+
+    previous_subgraph = subgraph;
+  }
+
+  // If the last statement block has a fall-through, connect it to the
+  // single exit block.
+  if (previous_subgraph != NULL && previous_subgraph->HasExit()) {
+    previous_subgraph->exit_block()->Finish(new HGoto(single_exit_block));
+  }
+
+  // If there is no default clause finish the last comparison's false target.
+  if (!last_false_block->IsFinished()) {
+    last_false_block->Finish(new HGoto(single_exit_block));
+  }
+
+  if (single_exit_block->HasPredecessor()) {
+    current_subgraph_->set_exit_block(single_exit_block);
+  } else {
+    current_subgraph_->set_exit_block(NULL);
+  }
+}
+
+bool HGraph::HasOsrEntryAt(IterationStatement* statement) {
+  return statement->OsrEntryId() == info()->osr_ast_id();
+}
+
+
+void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
+  if (!graph()->HasOsrEntryAt(statement)) return;
+
+  HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
+  HBasicBlock* osr_entry = graph()->CreateBasicBlock();
+  HValue* true_value = graph()->GetConstantTrue();
+  HBranch* branch = new HBranch(non_osr_entry, osr_entry, true_value);
+  exit_block()->Finish(branch);
+
+  HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
+  non_osr_entry->Goto(loop_predecessor);
+
+  int osr_entry_id = statement->OsrEntryId();
+  // We want the correct environment at the OsrEntry instruction.  Build
+  // it explicitly.  The expression stack should be empty.
+  int count = osr_entry->last_environment()->total_count();
+  ASSERT(count == (osr_entry->last_environment()->parameter_count() +
+                   osr_entry->last_environment()->local_count()));
+  for (int i = 0; i < count; ++i) {
+    HUnknownOSRValue* unknown = new HUnknownOSRValue;
+    osr_entry->AddInstruction(unknown);
+    osr_entry->last_environment()->Bind(i, unknown);
+  }
+
+  osr_entry->AddSimulate(osr_entry_id);
+  osr_entry->AddInstruction(new HOsrEntry(osr_entry_id));
+  osr_entry->Goto(loop_predecessor);
+  loop_predecessor->SetJoinId(statement->EntryId());
+  set_exit_block(loop_predecessor);
+}
+
+
+void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  ASSERT(subgraph()->HasExit());
+  subgraph()->PreProcessOsrEntry(stmt);
+
+  HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment());
+  ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  body_graph->ResolveContinue(stmt);
+
+  if (!body_graph->HasExit() || stmt->cond()->ToBooleanIsTrue()) {
+    current_subgraph_->AppendEndless(body_graph, stmt);
+  } else {
+    HSubgraph* go_back = CreateEmptySubgraph();
+    HSubgraph* exit = CreateEmptySubgraph();
+    {
+      SubgraphScope scope(this, body_graph);
+      VISIT_FOR_CONTROL(stmt->cond(),
+                        go_back->entry_block(),
+                        exit->entry_block());
+      go_back->entry_block()->SetJoinId(stmt->BackEdgeId());
+      exit->entry_block()->SetJoinId(stmt->ExitId());
+    }
+    current_subgraph_->AppendDoWhile(body_graph, stmt, go_back, exit);
+  }
+}
+
+
+bool HGraphBuilder::ShouldPeel(HSubgraph* cond, HSubgraph* body) {
+  return FLAG_use_peeling;
+}
+
+
+void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  ASSERT(subgraph()->HasExit());
+  subgraph()->PreProcessOsrEntry(stmt);
+
+  HSubgraph* cond_graph = NULL;
+  HSubgraph* body_graph = NULL;
+  HSubgraph* exit_graph = NULL;
+
+  // If the condition is constant true, do not generate a condition subgraph.
+  if (stmt->cond()->ToBooleanIsTrue()) {
+    body_graph = CreateLoopHeaderSubgraph(environment());
+    ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  } else {
+    cond_graph = CreateLoopHeaderSubgraph(environment());
+    body_graph = CreateEmptySubgraph();
+    exit_graph = CreateEmptySubgraph();
+    {
+      SubgraphScope scope(this, cond_graph);
+      VISIT_FOR_CONTROL(stmt->cond(),
+                        body_graph->entry_block(),
+                        exit_graph->entry_block());
+      body_graph->entry_block()->SetJoinId(stmt->BodyId());
+      exit_graph->entry_block()->SetJoinId(stmt->ExitId());
+    }
+    ADD_TO_SUBGRAPH(body_graph, stmt->body());
+  }
+
+  body_graph->ResolveContinue(stmt);
+
+  if (cond_graph != NULL) {
+    AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph);
+  } else {
+    // TODO(fschneider): Implement peeling for endless loops as well.
+    current_subgraph_->AppendEndless(body_graph, stmt);
+  }
+}
+
+
+void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
+                                      HSubgraph* cond_graph,
+                                      HSubgraph* body_graph,
+                                      HSubgraph* exit_graph) {
+  HSubgraph* loop = NULL;
+  if (body_graph->HasExit() && stmt != peeled_statement_ &&
+      ShouldPeel(cond_graph, body_graph)) {
+    // Save the last peeled iteration statement to prevent infinite recursion.
+    IterationStatement* outer_peeled_statement = peeled_statement_;
+    peeled_statement_ = stmt;
+    loop = CreateGotoSubgraph(body_graph->environment());
+    ADD_TO_SUBGRAPH(loop, stmt);
+    peeled_statement_ = outer_peeled_statement;
+  }
+  current_subgraph_->AppendWhile(cond_graph, body_graph, stmt, loop,
+                                 exit_graph);
+}
+
+
+void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  // Only visit the init statement in the peeled part of the loop.
+  if (stmt->init() != NULL && peeled_statement_ != stmt) {
+    Visit(stmt->init());
+    CHECK_BAILOUT;
+  }
+  ASSERT(subgraph()->HasExit());
+  subgraph()->PreProcessOsrEntry(stmt);
+
+  HSubgraph* cond_graph = NULL;
+  HSubgraph* body_graph = NULL;
+  HSubgraph* exit_graph = NULL;
+  if (stmt->cond() != NULL) {
+    cond_graph = CreateLoopHeaderSubgraph(environment());
+    body_graph = CreateEmptySubgraph();
+    exit_graph = CreateEmptySubgraph();
+    {
+      SubgraphScope scope(this, cond_graph);
+      VISIT_FOR_CONTROL(stmt->cond(),
+                        body_graph->entry_block(),
+                        exit_graph->entry_block());
+      body_graph->entry_block()->SetJoinId(stmt->BodyId());
+      exit_graph->entry_block()->SetJoinId(stmt->ExitId());
+    }
+  } else {
+    body_graph = CreateLoopHeaderSubgraph(environment());
+  }
+  ADD_TO_SUBGRAPH(body_graph, stmt->body());
+
+  HSubgraph* next_graph = NULL;
+  body_graph->ResolveContinue(stmt);
+
+  if (stmt->next() != NULL && body_graph->HasExit()) {
+    next_graph = CreateGotoSubgraph(body_graph->environment());
+    ADD_TO_SUBGRAPH(next_graph, stmt->next());
+    body_graph->Append(next_graph, NULL);
+    next_graph->entry_block()->SetJoinId(stmt->ContinueId());
+  }
+
+  if (cond_graph != NULL) {
+    AppendPeeledWhile(stmt, cond_graph, body_graph, exit_graph);
+  } else {
+    current_subgraph_->AppendEndless(body_graph, stmt);
+  }
+}
+
+
+void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  BAILOUT("ForInStatement");
+}
+
+
+void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  BAILOUT("TryCatchStatement");
+}
+
+
+void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  BAILOUT("TryFinallyStatement");
+}
+
+
+void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  BAILOUT("DebuggerStatement");
+}
+
+
+void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Handle<SharedFunctionInfo> shared_info =
+      Compiler::BuildFunctionInfo(expr, graph_->info()->script());
+  CHECK_BAILOUT;
+  HFunctionLiteral* instr =
+      new HFunctionLiteral(shared_info, expr->pretenure());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  BAILOUT("SharedFunctionInfoLiteral");
+}
+
+
+void HGraphBuilder::VisitConditional(Conditional* expr) {
+  HSubgraph* then_graph = CreateEmptySubgraph();
+  HSubgraph* else_graph = CreateEmptySubgraph();
+  VISIT_FOR_CONTROL(expr->condition(),
+                    then_graph->entry_block(),
+                    else_graph->entry_block());
+
+  then_graph->entry_block()->SetJoinId(expr->ThenId());
+  ADD_TO_SUBGRAPH(then_graph, expr->then_expression());
+
+  else_graph->entry_block()->SetJoinId(expr->ElseId());
+  ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
+
+  current_subgraph_->AppendJoin(then_graph, else_graph, expr);
+  ast_context()->ReturnValue(Pop());
+}
+
+
+void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
+                                             LookupResult* lookup,
+                                             bool is_store) {
+  if (var->is_this()) {
+    BAILOUT("global this reference");
+  }
+  if (!graph()->info()->has_global_object()) {
+    BAILOUT("no global object to optimize VariableProxy");
+  }
+  Handle<GlobalObject> global(graph()->info()->global_object());
+  global->Lookup(*var->name(), lookup);
+  if (!lookup->IsProperty()) {
+    BAILOUT("global variable cell not yet introduced");
+  }
+  if (lookup->type() != NORMAL) {
+    BAILOUT("global variable has accessors");
+  }
+  if (is_store && lookup->IsReadOnly()) {
+    BAILOUT("read-only global variable");
+  }
+}
+
+
+void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  Variable* variable = expr->AsVariable();
+  if (variable == NULL) {
+    BAILOUT("reference to rewritten variable");
+  } else if (variable->IsStackAllocated()) {
+    if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
+      BAILOUT("unsupported context for arguments object");
+    }
+    ast_context()->ReturnValue(environment()->Lookup(variable));
+  } else if (variable->is_global()) {
+    LookupResult lookup;
+    LookupGlobalPropertyCell(variable, &lookup, false);
+    CHECK_BAILOUT;
+
+    Handle<GlobalObject> global(graph()->info()->global_object());
+    // TODO(3039103): Handle global property load through an IC call when access
+    // checks are enabled.
+    if (global->IsAccessCheckNeeded()) {
+      BAILOUT("global object requires access check");
+    }
+    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+    HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
+    ast_context()->ReturnInstruction(instr, expr->id());
+  } else {
+    BAILOUT("reference to non-stack-allocated/non-global variable");
+  }
+}
+
+
+void HGraphBuilder::VisitLiteral(Literal* expr) {
+  HConstant* instr = new HConstant(expr->handle(), Representation::Tagged());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  HRegExpLiteral* instr = new HRegExpLiteral(expr->pattern(),
+                                             expr->flags(),
+                                             expr->literal_index());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  HObjectLiteral* literal = (new HObjectLiteral(expr->constant_properties(),
+                                                expr->fast_elements(),
+                                                expr->literal_index(),
+                                                expr->depth()));
+  // The object is expected in the bailout environment during computation
+  // of the property values and is the value of the entire expression.
+  PushAndAdd(literal);
+
+  expr->CalculateEmitStore();
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          if (property->emit_store()) {
+            VISIT_FOR_VALUE(value);
+            HValue* value = Pop();
+            Handle<String> name = Handle<String>::cast(key->handle());
+            AddInstruction(new HStoreNamedGeneric(literal, name, value));
+            AddSimulate(key->id());
+          } else {
+            VISIT_FOR_EFFECT(value);
+          }
+          break;
+        }
+        // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+      case ObjectLiteral::Property::SETTER:
+      case ObjectLiteral::Property::GETTER:
+        BAILOUT("Object literal with complex property");
+      default: UNREACHABLE();
+    }
+  }
+  ast_context()->ReturnValue(Pop());
+}
+
+
+void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
+  HArrayLiteral* literal = new HArrayLiteral(expr->constant_elements(),
+                                             length,
+                                             expr->literal_index(),
+                                             expr->depth());
+  // The array is expected in the bailout environment during computation
+  // of the property values and is the value of the entire expression.
+  PushAndAdd(literal);
+
+  HLoadElements* elements = NULL;
+
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    VISIT_FOR_VALUE(subexpr);
+    HValue* value = Pop();
+    if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+
+    // Load the elements array before the first store.
+    if (elements == NULL)  {
+     elements = new HLoadElements(literal);
+     AddInstruction(elements);
+    }
+
+    HValue* key = AddInstruction(new HConstant(Handle<Object>(Smi::FromInt(i)),
+                                               Representation::Integer32()));
+    AddInstruction(new HStoreKeyedFastElement(elements, key, value));
+    AddSimulate(expr->GetIdForElement(i));
+  }
+  ast_context()->ReturnValue(Pop());
+}
+
+
+void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  BAILOUT("CatchExtensionObject");
+}
+
+
+HBasicBlock* HGraphBuilder::BuildTypeSwitch(ZoneMapList* maps,
+                                            ZoneList<HSubgraph*>* subgraphs,
+                                            HValue* receiver,
+                                            int join_id) {
+  ASSERT(subgraphs->length() == (maps->length() + 1));
+
+  // Build map compare subgraphs for all but the first map.
+  ZoneList<HSubgraph*> map_compare_subgraphs(maps->length() - 1);
+  for (int i = maps->length() - 1; i > 0; --i) {
+    HSubgraph* subgraph = CreateBranchSubgraph(environment());
+    SubgraphScope scope(this, subgraph);
+    HSubgraph* else_subgraph =
+        (i == (maps->length() - 1))
+        ? subgraphs->last()
+        : map_compare_subgraphs.last();
+    current_subgraph_->exit_block()->Finish(
+        new HCompareMapAndBranch(receiver,
+                                 maps->at(i),
+                                 subgraphs->at(i)->entry_block(),
+                                 else_subgraph->entry_block()));
+    map_compare_subgraphs.Add(subgraph);
+  }
+
+  // Generate first map check to end the current block.
+  AddInstruction(new HCheckNonSmi(receiver));
+  HSubgraph* else_subgraph =
+      (maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
+  current_subgraph_->exit_block()->Finish(
+      new HCompareMapAndBranch(receiver,
+                               Handle<Map>(maps->first()),
+                               subgraphs->first()->entry_block(),
+                               else_subgraph->entry_block()));
+
+  // Join all the call subgraphs in a new basic block and make
+  // this basic block the current basic block.
+  HBasicBlock* join_block = graph_->CreateBasicBlock();
+  for (int i = 0; i < subgraphs->length(); ++i) {
+    if (subgraphs->at(i)->HasExit()) {
+      subgraphs->at(i)->exit_block()->Goto(join_block);
+    }
+  }
+
+  if (join_block->predecessors()->is_empty()) return NULL;
+  join_block->SetJoinId(join_id);
+  return join_block;
+}
+
+
+// Sets the lookup result and returns true if the store can be inlined.
+static bool ComputeStoredField(Handle<Map> type,
+                               Handle<String> name,
+                               LookupResult* lookup) {
+  type->LookupInDescriptors(NULL, *name, lookup);
+  if (!lookup->IsPropertyOrTransition()) return false;
+  if (lookup->type() == FIELD) return true;
+  return (lookup->type() == MAP_TRANSITION) &&
+      (type->unused_property_fields() > 0);
+}
+
+
+static int ComputeStoredFieldIndex(Handle<Map> type,
+                                   Handle<String> name,
+                                   LookupResult* lookup) {
+  ASSERT(lookup->type() == FIELD || lookup->type() == MAP_TRANSITION);
+  if (lookup->type() == FIELD) {
+    return lookup->GetLocalFieldIndexFromMap(*type);
+  } else {
+    Map* transition = lookup->GetTransitionMapFromMap(*type);
+    return transition->PropertyIndexFor(*name) - type->inobject_properties();
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
+                                                  Handle<String> name,
+                                                  HValue* value,
+                                                  Handle<Map> type,
+                                                  LookupResult* lookup,
+                                                  bool smi_and_map_check) {
+  if (smi_and_map_check) {
+    AddInstruction(new HCheckNonSmi(object));
+    AddInstruction(new HCheckMap(object, type));
+  }
+
+  int index = ComputeStoredFieldIndex(type, name, lookup);
+  bool is_in_object = index < 0;
+  int offset = index * kPointerSize;
+  if (index < 0) {
+    // Negative property indices are in-object properties, indexed
+    // from the end of the fixed part of the object.
+    offset += type->instance_size();
+  } else {
+    offset += FixedArray::kHeaderSize;
+  }
+  HStoreNamedField* instr =
+      new HStoreNamedField(object, name, value, is_in_object, offset);
+  if (lookup->type() == MAP_TRANSITION) {
+    Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
+    instr->set_transition(transition);
+    // TODO(fschneider): Record the new map type of the object in the IR to
+    // enable elimination of redundant checks after the transition store.
+    instr->SetFlag(HValue::kChangesMaps);
+  }
+  return instr;
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
+                                                    Handle<String> name,
+                                                    HValue* value) {
+  return new HStoreNamedGeneric(object, name, value);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
+                                             HValue* value,
+                                             Expression* expr) {
+  Property* prop = (expr->AsProperty() != NULL)
+      ? expr->AsProperty()
+      : expr->AsAssignment()->target()->AsProperty();
+  Literal* key = prop->key()->AsLiteral();
+  Handle<String> name = Handle<String>::cast(key->handle());
+  ASSERT(!name.is_null());
+
+  LookupResult lookup;
+  ZoneMapList* types = expr->GetReceiverTypes();
+  bool is_monomorphic = expr->IsMonomorphic() &&
+      ComputeStoredField(types->first(), name, &lookup);
+
+  return is_monomorphic
+      ? BuildStoreNamedField(object, name, value, types->first(), &lookup,
+                             true)  // Needs smi and map check.
+      : BuildStoreNamedGeneric(object, name, value);
+}
+
+
+void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
+                                                     HValue* object,
+                                                     HValue* value,
+                                                     ZoneMapList* types,
+                                                     Handle<String> name) {
+  int number_of_types = Min(types->length(), kMaxStorePolymorphism);
+  ZoneMapList maps(number_of_types);
+  ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+  bool needs_generic = (types->length() > kMaxStorePolymorphism);
+
+  // Build subgraphs for each of the specific maps.
+  //
+  // TODO(ager): We should recognize when the prototype chains for
+  // different maps are identical. In that case we can avoid
+  // repeatedly generating the same prototype map checks.
+  for (int i = 0; i < number_of_types; ++i) {
+    Handle<Map> map = types->at(i);
+    LookupResult lookup;
+    if (ComputeStoredField(map, name, &lookup)) {
+      maps.Add(map);
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      HInstruction* instr =
+          BuildStoreNamedField(object, name, value, map, &lookup, false);
+      Push(value);
+      instr->set_position(expr->position());
+      AddInstruction(instr);
+      subgraphs.Add(subgraph);
+    } else {
+      needs_generic = true;
+    }
+  }
+
+  // If none of the properties were named fields we generate a
+  // generic store.
+  if (maps.length() == 0) {
+    HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+    Push(value);
+    instr->set_position(expr->position());
+    AddInstruction(instr);
+    if (instr->HasSideEffects()) AddSimulate(expr->id());
+  } else {
+    // Build subgraph for generic store through IC.
+    {
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+        subgraph->FinishExit(new HDeoptimize());
+      } else {
+        HInstruction* instr = new HStoreNamedGeneric(object, name, value);
+        Push(value);
+        instr->set_position(expr->position());
+        AddInstruction(instr);
+      }
+      subgraphs.Add(subgraph);
+    }
+
+    HBasicBlock* new_exit_block =
+        BuildTypeSwitch(&maps, &subgraphs, object, expr->AssignmentId());
+    subgraph()->set_exit_block(new_exit_block);
+  }
+
+  if (subgraph()->HasExit()) ast_context()->ReturnValue(Pop());
+}
+
+
+void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(prop != NULL);
+  expr->RecordTypeFeedback(oracle());
+  VISIT_FOR_VALUE(prop->obj());
+
+  HValue* value = NULL;
+  HInstruction* instr = NULL;
+
+  if (prop->key()->IsPropertyName()) {
+    // Named store.
+    VISIT_FOR_VALUE(expr->value());
+    value = Pop();
+    HValue* object = Pop();
+
+    Literal* key = prop->key()->AsLiteral();
+    Handle<String> name = Handle<String>::cast(key->handle());
+    ASSERT(!name.is_null());
+
+    ZoneMapList* types = expr->GetReceiverTypes();
+    LookupResult lookup;
+
+    if (expr->IsMonomorphic()) {
+      instr = BuildStoreNamed(object, value, expr);
+
+    } else if (types != NULL && types->length() > 1) {
+      HandlePolymorphicStoreNamedField(expr, object, value, types, name);
+      return;
+
+    } else {
+      instr = new HStoreNamedGeneric(object, name, value);
+    }
+
+  } else {
+    // Keyed store.
+    VISIT_FOR_VALUE(prop->key());
+    VISIT_FOR_VALUE(expr->value());
+    value = Pop();
+    HValue* key = Pop();
+    HValue* object = Pop();
+
+    bool is_fast_elements = expr->IsMonomorphic() &&
+        expr->GetMonomorphicReceiverType()->has_fast_elements();
+
+    instr = is_fast_elements
+        ? BuildStoreKeyedFastElement(object, key, value, expr)
+        : BuildStoreKeyedGeneric(object, key, value);
+  }
+
+  Push(value);
+  instr->set_position(expr->position());
+  AddInstruction(instr);
+  if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
+  ast_context()->ReturnValue(Pop());
+}
+
+
+// Because not every expression has a position and there is not common
+// superclass of Assignment and CountOperation, we cannot just pass the
+// owning expression instead of position and ast_id separately.
+void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
+                                                   HValue* value,
+                                                   int position,
+                                                   int ast_id) {
+  LookupResult lookup;
+  LookupGlobalPropertyCell(var, &lookup, true);
+  CHECK_BAILOUT;
+
+  Handle<GlobalObject> global(graph()->info()->global_object());
+  Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+  HInstruction* instr = new HStoreGlobal(value, cell);
+  instr->set_position(position);
+  AddInstruction(instr);
+  if (instr->HasSideEffects()) AddSimulate(ast_id);
+}
+
+
+void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+  Expression* target = expr->target();
+  VariableProxy* proxy = target->AsVariableProxy();
+  Variable* var = proxy->AsVariable();
+  Property* prop = target->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+
+  // We have a second position recorded in the FullCodeGenerator to have
+  // type feedback for the binary operation.
+  BinaryOperation* operation = expr->binary_operation();
+  operation->RecordTypeFeedback(oracle());
+
+  if (var != NULL) {
+    if (!var->is_global() && !var->IsStackAllocated()) {
+      BAILOUT("non-stack/non-global in compound assignment");
+    }
+
+    VISIT_FOR_VALUE(operation);
+
+    if (var->is_global()) {
+      HandleGlobalVariableAssignment(var,
+                                     Top(),
+                                     expr->position(),
+                                     expr->AssignmentId());
+    } else {
+      Bind(var, Top());
+    }
+    ast_context()->ReturnValue(Pop());
+
+  } else if (prop != NULL) {
+    prop->RecordTypeFeedback(oracle());
+
+    if (prop->key()->IsPropertyName()) {
+      // Named property.
+      VISIT_FOR_VALUE(prop->obj());
+      HValue* obj = Top();
+
+      HInstruction* load = NULL;
+      if (prop->IsMonomorphic()) {
+        Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+        Handle<Map> map = prop->GetReceiverTypes()->first();
+        load = BuildLoadNamed(obj, prop, map, name);
+      } else {
+        load = BuildLoadNamedGeneric(obj, prop);
+      }
+      PushAndAdd(load);
+      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+
+      VISIT_FOR_VALUE(expr->value());
+      HValue* right = Pop();
+      HValue* left = Pop();
+
+      HInstruction* instr = BuildBinaryOperation(operation, left, right);
+      PushAndAdd(instr);
+      if (instr->HasSideEffects()) AddSimulate(operation->id());
+
+      HInstruction* store = BuildStoreNamed(obj, instr, prop);
+      AddInstruction(store);
+      // Drop the simulated receiver and value.  Return the value.
+      Drop(2);
+      Push(instr);
+      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      ast_context()->ReturnValue(Pop());
+
+    } else {
+      // Keyed property.
+      VISIT_FOR_VALUE(prop->obj());
+      VISIT_FOR_VALUE(prop->key());
+      HValue* obj = environment()->ExpressionStackAt(1);
+      HValue* key = environment()->ExpressionStackAt(0);
+
+      bool is_fast_elements = prop->IsMonomorphic() &&
+          prop->GetMonomorphicReceiverType()->has_fast_elements();
+
+      HInstruction* load = is_fast_elements
+          ? BuildLoadKeyedFastElement(obj, key, prop)
+          : BuildLoadKeyedGeneric(obj, key);
+      PushAndAdd(load);
+      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
+
+      VISIT_FOR_VALUE(expr->value());
+      HValue* right = Pop();
+      HValue* left = Pop();
+
+      HInstruction* instr = BuildBinaryOperation(operation, left, right);
+      PushAndAdd(instr);
+      if (instr->HasSideEffects()) AddSimulate(operation->id());
+
+      HInstruction* store = is_fast_elements
+          ? BuildStoreKeyedFastElement(obj, key, instr, prop)
+          : BuildStoreKeyedGeneric(obj, key, instr);
+      AddInstruction(store);
+      // Drop the simulated receiver, key, and value.  Return the value.
+      Drop(3);
+      Push(instr);
+      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      ast_context()->ReturnValue(Pop());
+    }
+
+  } else {
+    BAILOUT("invalid lhs in compound assignment");
+  }
+}
+
+
+void HGraphBuilder::VisitAssignment(Assignment* expr) {
+  VariableProxy* proxy = expr->target()->AsVariableProxy();
+  Variable* var = proxy->AsVariable();
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+
+  if (expr->is_compound()) {
+    HandleCompoundAssignment(expr);
+    return;
+  }
+
+  if (var != NULL) {
+    if (proxy->IsArguments()) BAILOUT("assignment to arguments");
+
+    // Handle the assignment.
+    if (var->is_global()) {
+      VISIT_FOR_VALUE(expr->value());
+      HandleGlobalVariableAssignment(var,
+                                     Top(),
+                                     expr->position(),
+                                     expr->AssignmentId());
+    } else {
+      // We allow reference to the arguments object only in assignemtns
+      // to local variables to make sure that the arguments object does
+      // not escape and is not modified.
+      VariableProxy* rhs = expr->value()->AsVariableProxy();
+      if (rhs != NULL &&
+          rhs->var()->IsStackAllocated() &&
+          environment()->Lookup(rhs->var())->CheckFlag(HValue::kIsArguments)) {
+        Push(environment()->Lookup(rhs->var()));
+      } else {
+        VISIT_FOR_VALUE(expr->value());
+      }
+      Bind(proxy->var(), Top());
+    }
+    // Return the value.
+    ast_context()->ReturnValue(Pop());
+
+  } else if (prop != NULL) {
+    HandlePropertyAssignment(expr);
+  } else {
+    BAILOUT("unsupported invalid lhs");
+  }
+}
+
+
+void HGraphBuilder::VisitThrow(Throw* expr) {
+  // We don't optimize functions with invalid left-hand sides in
+  // assignments, count operations, or for-in.  Consequently throw can
+  // currently only occur in an effect context.
+  ASSERT(ast_context()->IsEffect());
+  VISIT_FOR_VALUE(expr->exception());
+
+  HValue* value = environment()->Pop();
+  HControlInstruction* instr = new HThrow(value);
+  instr->set_position(expr->position());
+  current_subgraph_->FinishExit(instr);
+}
+
+
+void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
+                                                    HValue* object,
+                                                    ZoneMapList* types,
+                                                    Handle<String> name) {
+  int number_of_types = Min(types->length(), kMaxLoadPolymorphism);
+  ZoneMapList maps(number_of_types);
+  ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+  bool needs_generic = (types->length() > kMaxLoadPolymorphism);
+
+  // Build subgraphs for each of the specific maps.
+  //
+  // TODO(ager): We should recognize when the prototype chains for
+  // different maps are identical. In that case we can avoid
+  // repeatedly generating the same prototype map checks.
+  for (int i = 0; i < number_of_types; ++i) {
+    Handle<Map> map = types->at(i);
+    LookupResult lookup;
+    map->LookupInDescriptors(NULL, *name, &lookup);
+    if (lookup.IsProperty() && lookup.type() == FIELD) {
+      maps.Add(map);
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      HLoadNamedField* instr =
+          BuildLoadNamedField(object, expr, map, &lookup, false);
+      instr->set_position(expr->position());
+      instr->ClearFlag(HValue::kUseGVN);  // Don't do GVN on polymorphic loads.
+      PushAndAdd(instr);
+      subgraphs.Add(subgraph);
+    } else {
+      needs_generic = true;
+    }
+  }
+
+  // If none of the properties were named fields we generate a
+  // generic load.
+  if (maps.length() == 0) {
+    HInstruction* instr = BuildLoadNamedGeneric(object, expr);
+    instr->set_position(expr->position());
+    PushAndAdd(instr);
+    if (instr->HasSideEffects()) AddSimulate(expr->id());
+  } else {
+    // Build subgraph for generic load through IC.
+    {
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+        subgraph->FinishExit(new HDeoptimize());
+      } else {
+        HInstruction* instr = BuildLoadNamedGeneric(object, expr);
+        instr->set_position(expr->position());
+        PushAndAdd(instr);
+      }
+      subgraphs.Add(subgraph);
+    }
+
+    HBasicBlock* new_exit_block =
+        BuildTypeSwitch(&maps, &subgraphs, object, expr->id());
+    subgraph()->set_exit_block(new_exit_block);
+  }
+
+  if (subgraph()->HasExit()) ast_context()->ReturnValue(Pop());
+}
+
+
+HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
+                                                    Property* expr,
+                                                    Handle<Map> type,
+                                                    LookupResult* lookup,
+                                                    bool smi_and_map_check) {
+  if (smi_and_map_check) {
+    AddInstruction(new HCheckNonSmi(object));
+    AddInstruction(new HCheckMap(object, type));
+  }
+
+  int index = lookup->GetLocalFieldIndexFromMap(*type);
+  if (index < 0) {
+    // Negative property indices are in-object properties, indexed
+    // from the end of the fixed part of the object.
+    int offset = (index * kPointerSize) + type->instance_size();
+    return new HLoadNamedField(object, true, offset);
+  } else {
+    // Non-negative property indices are in the properties array.
+    int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+    return new HLoadNamedField(object, false, offset);
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
+                                                   Property* expr) {
+  ASSERT(expr->key()->IsPropertyName());
+  Handle<Object> name = expr->key()->AsLiteral()->handle();
+  return new HLoadNamedGeneric(obj, name);
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadNamed(HValue* obj,
+                                            Property* expr,
+                                            Handle<Map> map,
+                                            Handle<String> name) {
+  LookupResult lookup;
+  map->LookupInDescriptors(NULL, *name, &lookup);
+  if (lookup.IsProperty() && lookup.type() == FIELD) {
+    return BuildLoadNamedField(obj,
+                               expr,
+                               map,
+                               &lookup,
+                               true);
+  } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+    AddInstruction(new HCheckNonSmi(obj));
+    AddInstruction(new HCheckMap(obj, map));
+    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
+    return new HConstant(function, Representation::Tagged());
+  } else {
+    return BuildLoadNamedGeneric(obj, expr);
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+                                                   HValue* key) {
+  return new HLoadKeyedGeneric(object, key);
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadKeyedFastElement(HValue* object,
+                                                       HValue* key,
+                                                       Property* expr) {
+  ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
+  AddInstruction(new HCheckNonSmi(object));
+  Handle<Map> map = expr->GetMonomorphicReceiverType();
+  ASSERT(map->has_fast_elements());
+  AddInstruction(new HCheckMap(object, map));
+  HInstruction* elements = AddInstruction(new HLoadElements(object));
+  HInstruction* length = AddInstruction(new HArrayLength(elements));
+  AddInstruction(new HBoundsCheck(key, length));
+  return new HLoadKeyedFastElement(elements, key);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
+                                                    HValue* key,
+                                                    HValue* value) {
+  return new HStoreKeyedGeneric(object, key, value);
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreKeyedFastElement(HValue* object,
+                                                        HValue* key,
+                                                        HValue* val,
+                                                        Expression* expr) {
+  ASSERT(expr->IsMonomorphic());
+  AddInstruction(new HCheckNonSmi(object));
+  Handle<Map> map = expr->GetMonomorphicReceiverType();
+  ASSERT(map->has_fast_elements());
+  AddInstruction(new HCheckMap(object, map));
+  HInstruction* elements = AddInstruction(new HLoadElements(object));
+  AddInstruction(new HCheckMap(elements, Factory::fixed_array_map()));
+  bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
+  HInstruction* length = NULL;
+  if (is_array) {
+    length = AddInstruction(new HArrayLength(object));
+  } else {
+    length = AddInstruction(new HArrayLength(elements));
+  }
+  AddInstruction(new HBoundsCheck(key, length));
+  return new HStoreKeyedFastElement(elements, key, val);
+}
+
+
+bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+  VariableProxy* proxy = expr->obj()->AsVariableProxy();
+  if (proxy == NULL) return false;
+  if (!proxy->var()->IsStackAllocated()) return false;
+  if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
+    return false;
+  }
+
+  HInstruction* result = NULL;
+  if (expr->key()->IsPropertyName()) {
+    Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
+    if (!name->IsEqualTo(CStrVector("length"))) return false;
+    HInstruction* elements = AddInstruction(new HArgumentsElements);
+    result = new HArgumentsLength(elements);
+  } else {
+    VisitForValue(expr->key());
+    if (HasStackOverflow()) return false;
+    HValue* key = Pop();
+    HInstruction* elements = AddInstruction(new HArgumentsElements);
+    HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+    AddInstruction(new HBoundsCheck(key, length));
+    result = new HAccessArgumentsAt(elements, length, key);
+  }
+  ast_context()->ReturnInstruction(result, expr->id());
+  return true;
+}
+
+
+void HGraphBuilder::VisitProperty(Property* expr) {
+  expr->RecordTypeFeedback(oracle());
+
+  if (TryArgumentsAccess(expr)) return;
+  CHECK_BAILOUT;
+
+  VISIT_FOR_VALUE(expr->obj());
+
+  HInstruction* instr = NULL;
+  if (expr->IsArrayLength()) {
+    HValue* array = Pop();
+    AddInstruction(new HCheckNonSmi(array));
+    instr = new HArrayLength(array);
+
+  } else if (expr->key()->IsPropertyName()) {
+    Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
+    ZoneMapList* types = expr->GetReceiverTypes();
+
+    HValue* obj = Pop();
+    if (expr->IsMonomorphic()) {
+      instr = BuildLoadNamed(obj, expr, types->first(), name);
+    } else if (types != NULL && types->length() > 1) {
+      HandlePolymorphicLoadNamedField(expr, obj, types, name);
+      return;
+
+    } else {
+      instr = BuildLoadNamedGeneric(obj, expr);
+    }
+
+  } else {
+    VISIT_FOR_VALUE(expr->key());
+
+    HValue* key = Pop();
+    HValue* obj = Pop();
+
+    bool is_fast_elements = expr->IsMonomorphic() &&
+        expr->GetMonomorphicReceiverType()->has_fast_elements();
+
+    instr = is_fast_elements
+        ? BuildLoadKeyedFastElement(obj, key, expr)
+        : BuildLoadKeyedGeneric(obj, key);
+  }
+  instr->set_position(expr->position());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::AddCheckConstantFunction(Call* expr,
+                                             HValue* receiver,
+                                             Handle<Map> receiver_map,
+                                             bool smi_and_map_check) {
+  // Constant functions have the nice property that the map will change if they
+  // are overwritten.  Therefore it is enough to check the map of the holder and
+  // its prototypes.
+  if (smi_and_map_check) {
+    AddInstruction(new HCheckNonSmi(receiver));
+    AddInstruction(new HCheckMap(receiver, receiver_map));
+  }
+  if (!expr->holder().is_null()) {
+    AddInstruction(new HCheckPrototypeMaps(receiver,
+                                           expr->holder(),
+                                           receiver_map));
+  }
+}
+
+
+void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
+                                               HValue* receiver,
+                                               ZoneMapList* types,
+                                               Handle<String> name) {
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  int number_of_types = Min(types->length(), kMaxCallPolymorphism);
+  ZoneMapList maps(number_of_types);
+  ZoneList<HSubgraph*> subgraphs(number_of_types + 1);
+  bool needs_generic = (types->length() > kMaxCallPolymorphism);
+
+  // Build subgraphs for each of the specific maps.
+  //
+  // TODO(ager): We should recognize when the prototype chains for different
+  // maps are identical. In that case we can avoid repeatedly generating the
+  // same prototype map checks.
+  for (int i = 0; i < number_of_types; ++i) {
+    Handle<Map> map = types->at(i);
+    if (expr->ComputeTarget(map, name)) {
+      maps.Add(map);
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      AddCheckConstantFunction(expr, receiver, map, false);
+      if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
+        PrintF("Trying to inline the polymorphic call to %s\n",
+               *name->ToCString());
+      }
+      if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
+        // Check for bailout, as trying to inline might fail due to bailout
+        // during hydrogen processing.
+        CHECK_BAILOUT;
+        HCall* call = new HCallConstantFunction(expr->target(), argument_count);
+        call->set_position(expr->position());
+        ProcessCall(call);
+        PushAndAdd(call);
+      }
+      subgraphs.Add(subgraph);
+    } else {
+      needs_generic = true;
+    }
+  }
+
+  // If we couldn't compute the target for any of the maps just perform an
+  // IC call.
+  if (maps.length() == 0) {
+    HCall* call = new HCallNamed(name, argument_count);
+    call->set_position(expr->position());
+    ProcessCall(call);
+    ast_context()->ReturnInstruction(call, expr->id());
+  } else {
+    // Build subgraph for generic call through IC.
+    {
+      HSubgraph* subgraph = CreateBranchSubgraph(environment());
+      SubgraphScope scope(this, subgraph);
+      if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
+        subgraph->FinishExit(new HDeoptimize());
+      } else {
+        HCall* call = new HCallNamed(name, argument_count);
+        call->set_position(expr->position());
+        ProcessCall(call);
+        PushAndAdd(call);
+      }
+      subgraphs.Add(subgraph);
+    }
+
+    HBasicBlock* new_exit_block =
+        BuildTypeSwitch(&maps, &subgraphs, receiver, expr->id());
+    subgraph()->set_exit_block(new_exit_block);
+    if (new_exit_block != NULL) ast_context()->ReturnValue(Pop());
+  }
+}
+
+
+void HGraphBuilder::TraceInline(Handle<JSFunction> target, bool result) {
+  SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
+  SmartPointer<char> caller =
+      graph()->info()->function()->debug_name()->ToCString();
+  if (result) {
+    PrintF("Inlined %s called from %s.\n", *callee, *caller);
+  } else {
+    PrintF("Do not inline %s called from %s.\n", *callee, *caller);
+  }
+}
+
+
+bool HGraphBuilder::TryInline(Call* expr) {
+  if (!FLAG_use_inlining) return false;
+
+  // Precondition: call is monomorphic and we have found a target with the
+  // appropriate arity.
+  Handle<JSFunction> target = expr->target();
+
+  // Do a quick check on source code length to avoid parsing large
+  // inlining candidates.
+  if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
+    if (FLAG_trace_inlining) TraceInline(target, false);
+    return false;
+  }
+
+  // Target must be inlineable.
+  if (!target->IsInlineable()) return false;
+
+  // No context change required.
+  CompilationInfo* outer_info = graph()->info();
+  if (target->context() != outer_info->closure()->context() ||
+      outer_info->scope()->contains_with() ||
+      outer_info->scope()->num_heap_slots() > 0) {
+    return false;
+  }
+
+  // Don't inline deeper than two calls.
+  HEnvironment* env = environment();
+  if (env->outer() != NULL && env->outer()->outer() != NULL) return false;
+
+  // Don't inline recursive functions.
+  if (target->shared() == outer_info->closure()->shared()) return false;
+
+  // We don't want to add more than a certain number of nodes from inlining.
+  if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
+    if (FLAG_trace_inlining) TraceInline(target, false);
+    return false;
+  }
+
+  int count_before = AstNode::Count();
+
+  // Parse and allocate variables.
+  Handle<SharedFunctionInfo> shared(target->shared());
+  CompilationInfo inner_info(shared);
+  if (!ParserApi::Parse(&inner_info) ||
+      !Scope::Analyze(&inner_info)) {
+    return false;
+  }
+  FunctionLiteral* function = inner_info.function();
+
+  // Count the number of AST nodes added by inlining this call.
+  int nodes_added = AstNode::Count() - count_before;
+  if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
+    if (FLAG_trace_inlining) TraceInline(target, false);
+    return false;
+  }
+
+  // Check if we can handle all declarations in the inlined functions.
+  VisitDeclarations(inner_info.scope()->declarations());
+  if (HasStackOverflow()) {
+    ClearStackOverflow();
+    return false;
+  }
+
+  // Don't inline functions that uses the arguments object or that
+  // have a mismatching number of parameters.
+  int arity = expr->arguments()->length();
+  if (function->scope()->arguments() != NULL ||
+      arity != target->shared()->formal_parameter_count()) {
+    return false;
+  }
+
+  // All statements in the body must be inlineable.
+  for (int i = 0, count = function->body()->length(); i < count; ++i) {
+    if (!function->body()->at(i)->IsInlineable()) return false;
+  }
+
+  // Generate the deoptimization data for the unoptimized version of
+  // the target function if we don't already have it.
+  if (!shared->has_deoptimization_support()) {
+    // Note that we compile here using the same AST that we will use for
+    // generating the optimized inline code.
+    inner_info.EnableDeoptimizationSupport();
+    if (!FullCodeGenerator::MakeCode(&inner_info)) return false;
+    shared->EnableDeoptimizationSupport(*inner_info.code());
+    Compiler::RecordFunctionCompilation(
+        Logger::FUNCTION_TAG,
+        Handle<String>(shared->DebugName()),
+        shared->start_position(),
+        &inner_info);
+  }
+
+  // Save the pending call context and type feedback oracle. Set up new ones
+  // for the inlined function.
+  ASSERT(shared->has_deoptimization_support());
+  AstContext* saved_call_context = call_context();
+  HBasicBlock* saved_function_return = function_return();
+  TypeFeedbackOracle* saved_oracle = oracle();
+  // On-stack replacement cannot target inlined functions.  Since we don't
+  // use a separate CompilationInfo structure for the inlined function, we
+  // save and restore the AST ID in the original compilation info.
+  int saved_osr_ast_id = graph()->info()->osr_ast_id();
+
+  TestContext* test_context = NULL;
+  if (ast_context()->IsTest()) {
+    // Inlined body is treated as if it occurs in an 'inlined' call context
+    // with true and false blocks that will forward to the real ones.
+    HBasicBlock* if_true = graph()->CreateBasicBlock();
+    HBasicBlock* if_false = graph()->CreateBasicBlock();
+    if_true->MarkAsInlineReturnTarget();
+    if_false->MarkAsInlineReturnTarget();
+    // AstContext constructor pushes on the context stack.
+    test_context = new TestContext(this, if_true, if_false);
+    function_return_ = NULL;
+  } else {
+    // Inlined body is treated as if it occurs in the original call context.
+    function_return_ = graph()->CreateBasicBlock();
+    function_return_->MarkAsInlineReturnTarget();
+  }
+  call_context_ = ast_context();
+  TypeFeedbackOracle new_oracle(Handle<Code>(shared->code()));
+  oracle_ = &new_oracle;
+  graph()->info()->SetOsrAstId(AstNode::kNoNumber);
+
+  HSubgraph* body = CreateInlinedSubgraph(env, target, function);
+  body->exit_block()->AddInstruction(new HEnterInlined(target, function));
+  AddToSubgraph(body, function->body());
+  if (HasStackOverflow()) {
+    // Bail out if the inline function did, as we cannot residualize a call
+    // instead.
+    delete test_context;
+    call_context_ = saved_call_context;
+    function_return_ = saved_function_return;
+    oracle_ = saved_oracle;
+    graph()->info()->SetOsrAstId(saved_osr_ast_id);
+    return false;
+  }
+
+  // Update inlined nodes count.
+  inlined_count_ += nodes_added;
+
+  if (FLAG_trace_inlining) TraceInline(target, true);
+
+  if (body->HasExit()) {
+    // Add a return of undefined if control can fall off the body.  In a
+    // test context, undefined is false.
+    HValue* return_value = graph()->GetConstantUndefined();
+    if (test_context == NULL) {
+      ASSERT(function_return_ != NULL);
+      body->exit_block()->AddLeaveInlined(return_value, function_return_);
+    } else {
+      // The graph builder assumes control can reach both branches of a
+      // test, so we materialize the undefined value and test it rather than
+      // simply jumping to the false target.
+      //
+      // TODO(3168478): refactor to avoid this.
+      HBasicBlock* empty_true = graph()->CreateBasicBlock();
+      HBasicBlock* empty_false = graph()->CreateBasicBlock();
+      HBranch* branch =
+          new HBranch(empty_true, empty_false, return_value);
+      body->exit_block()->Finish(branch);
+
+      HValue* const no_return_value = NULL;
+      empty_true->AddLeaveInlined(no_return_value, test_context->if_true());
+      empty_false->AddLeaveInlined(no_return_value, test_context->if_false());
+    }
+    body->set_exit_block(NULL);
+  }
+
+  // Record the environment at the inlined function call.
+  AddSimulate(expr->ReturnId());
+
+  // Jump to the function entry (without re-recording the environment).
+  subgraph()->exit_block()->Finish(new HGoto(body->entry_block()));
+
+  // Fix up the function exits.
+  if (test_context != NULL) {
+    HBasicBlock* if_true = test_context->if_true();
+    HBasicBlock* if_false = test_context->if_false();
+    if_true->SetJoinId(expr->id());
+    if_false->SetJoinId(expr->id());
+    ASSERT(ast_context() == test_context);
+    delete test_context;  // Destructor pops from expression context stack.
+
+    // Forward to the real test context.
+    HValue* const no_return_value = NULL;
+    HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
+    if (true_target->IsInlineReturnTarget()) {
+      if_true->AddLeaveInlined(no_return_value, true_target);
+    } else {
+      if_true->Goto(true_target);
+    }
+
+    HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
+    if (false_target->IsInlineReturnTarget()) {
+      if_false->AddLeaveInlined(no_return_value, false_target);
+    } else {
+      if_false->Goto(false_target);
+    }
+
+    // TODO(kmillikin): Come up with a better way to handle this. It is too
+    // subtle. NULL here indicates that the enclosing context has no control
+    // flow to handle.
+    subgraph()->set_exit_block(NULL);
+
+  } else {
+    function_return_->SetJoinId(expr->id());
+    subgraph()->set_exit_block(function_return_);
+  }
+
+  call_context_ = saved_call_context;
+  function_return_ = saved_function_return;
+  oracle_ = saved_oracle;
+  graph()->info()->SetOsrAstId(saved_osr_ast_id);
+
+  return true;
+}
+
+
+void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
+  ASSERT(target->IsInlineReturnTarget());
+  AddInstruction(new HLeaveInlined);
+  HEnvironment* outer = last_environment()->outer();
+  if (return_value != NULL) outer->Push(return_value);
+  UpdateEnvironment(outer);
+  Goto(target);
+}
+
+
+bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
+  // Try to inline calls like Math.* as operations in the calling function.
+  if (!expr->target()->shared()->IsBuiltinMathFunction()) return false;
+  BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  switch (id) {
+    case kMathRound:
+    case kMathFloor:
+    case kMathAbs:
+    case kMathSqrt:
+    case kMathLog:
+    case kMathSin:
+    case kMathCos:
+      if (argument_count == 2) {
+        HValue* argument = Pop();
+        Drop(1);  // Receiver.
+        HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
+        op->set_position(expr->position());
+        ast_context()->ReturnInstruction(op, expr->id());
+        return true;
+      }
+      break;
+    case kMathPow:
+      if (argument_count == 3) {
+        HValue* right = Pop();
+        HValue* left = Pop();
+        Pop();  // Pop receiver.
+        HInstruction* result = NULL;
+        // Use sqrt() if exponent is 0.5 or -0.5.
+        if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
+          double exponent = HConstant::cast(right)->DoubleValue();
+          if (exponent == 0.5) {
+            result = new HUnaryMathOperation(left, kMathPowHalf);
+            ast_context()->ReturnInstruction(result, expr->id());
+            return true;
+          } else if (exponent == -0.5) {
+            HConstant* double_one =
+                new HConstant(Handle<Object>(Smi::FromInt(1)),
+                              Representation::Double());
+            AddInstruction(double_one);
+            HUnaryMathOperation* square_root =
+                new HUnaryMathOperation(left, kMathPowHalf);
+            AddInstruction(square_root);
+            // MathPowHalf doesn't have side effects so there's no need for
+            // an environment simulation here.
+            ASSERT(!square_root->HasSideEffects());
+            result = new HDiv(double_one, square_root);
+            ast_context()->ReturnInstruction(result, expr->id());
+            return true;
+          } else if (exponent == 2.0) {
+            result = new HMul(left, left);
+            ast_context()->ReturnInstruction(result, expr->id());
+            return true;
+          }
+        } else if (right->IsConstant() &&
+            HConstant::cast(right)->HasInteger32Value() &&
+            HConstant::cast(right)->Integer32Value() == 2) {
+          result = new HMul(left, left);
+          ast_context()->ReturnInstruction(result, expr->id());
+          return true;
+        }
+
+        result = new HPower(left, right);
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    default:
+      // Not yet supported for inlining.
+      break;
+  }
+  return false;
+}
+
+
+bool HGraphBuilder::TryCallApply(Call* expr) {
+  Expression* callee = expr->expression();
+  Property* prop = callee->AsProperty();
+  ASSERT(prop != NULL);
+
+  if (graph()->info()->scope()->arguments() == NULL) return false;
+
+  Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+  if (!name->IsEqualTo(CStrVector("apply"))) return false;
+
+  ZoneList<Expression*>* args = expr->arguments();
+  if (args->length() != 2) return false;
+
+  VariableProxy* arg_two = args->at(1)->AsVariableProxy();
+  if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
+  HValue* arg_two_value = environment()->Lookup(arg_two->var());
+  if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
+
+  if (!expr->IsMonomorphic()) return false;
+
+  // Found pattern f.apply(receiver, arguments).
+  VisitForValue(prop->obj());
+  if (HasStackOverflow()) return false;
+  HValue* function = Pop();
+  VisitForValue(args->at(0));
+  if (HasStackOverflow()) return false;
+  HValue* receiver = Pop();
+  HInstruction* elements = AddInstruction(new HArgumentsElements);
+  HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+  AddCheckConstantFunction(expr,
+                           function,
+                           expr->GetReceiverTypes()->first(),
+                           true);
+  HInstruction* result =
+      new HApplyArguments(function, receiver, length, elements);
+  result->set_position(expr->position());
+  ast_context()->ReturnInstruction(result, expr->id());
+  return true;
+}
+
+
+void HGraphBuilder::VisitCall(Call* expr) {
+  Expression* callee = expr->expression();
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  HCall* call = NULL;
+
+  Property* prop = callee->AsProperty();
+  if (prop != NULL) {
+    if (!prop->key()->IsPropertyName()) {
+      // Keyed function call.
+      VisitArgument(prop->obj());
+      CHECK_BAILOUT;
+
+      VISIT_FOR_VALUE(prop->key());
+      // Push receiver and key like the non-optimized code generator expects it.
+      HValue* key = Pop();
+      HValue* receiver = Pop();
+      Push(key);
+      Push(receiver);
+
+      VisitArgumentList(expr->arguments());
+      CHECK_BAILOUT;
+
+      call = new HCallKeyed(key, argument_count);
+      call->set_position(expr->position());
+      ProcessCall(call);
+      Drop(1);  // Key.
+      ast_context()->ReturnInstruction(call, expr->id());
+      return;
+    }
+
+    // Named function call.
+    expr->RecordTypeFeedback(oracle());
+
+    if (TryCallApply(expr)) return;
+    CHECK_BAILOUT;
+
+    HValue* receiver = VisitArgument(prop->obj());
+    CHECK_BAILOUT;
+    VisitArgumentList(expr->arguments());
+    CHECK_BAILOUT;
+
+    Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+
+    expr->RecordTypeFeedback(oracle());
+    ZoneMapList* types = expr->GetReceiverTypes();
+
+    if (expr->IsMonomorphic()) {
+      AddCheckConstantFunction(expr, receiver, types->first(), true);
+
+      if (TryMathFunctionInline(expr)) {
+        return;
+      } else if (TryInline(expr)) {
+        if (subgraph()->HasExit()) {
+          HValue* return_value = Pop();
+          // If we inlined a function in a test context then we need to emit
+          // a simulate here to shadow the ones at the end of the
+          // predecessor blocks.  Those environments contain the return
+          // value on top and do not correspond to any actual state of the
+          // unoptimized code.
+          if (ast_context()->IsEffect()) AddSimulate(expr->id());
+          ast_context()->ReturnValue(return_value);
+        }
+        return;
+      } else {
+        // Check for bailout, as the TryInline call in the if condition above
+        // might return false due to bailout during hydrogen processing.
+        CHECK_BAILOUT;
+        call = new HCallConstantFunction(expr->target(), argument_count);
+      }
+
+    } else if (types != NULL && types->length() > 1) {
+      HandlePolymorphicCallNamed(expr, receiver, types, name);
+      return;
+
+    } else {
+      call = new HCallNamed(name, argument_count);
+    }
+
+  } else {
+    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+    bool global_call = (var != NULL) && var->is_global() && !var->is_this();
+
+    if (!global_call) {
+      ++argument_count;
+      VisitArgument(expr->expression());
+      CHECK_BAILOUT;
+    }
+
+    if (global_call) {
+      // If there is a global property cell for the name at compile time and
+      // access check is not enabled we assume that the function will not change
+      // and generate optimized code for calling the function.
+      CompilationInfo* info = graph()->info();
+      bool known_global_function = info->has_global_object() &&
+          !info->global_object()->IsAccessCheckNeeded() &&
+          expr->ComputeGlobalTarget(Handle<GlobalObject>(info->global_object()),
+                                    var->name());
+      if (known_global_function) {
+        // Push the global object instead of the global receiver because
+        // code generated by the full code generator expects it.
+        PushAndAdd(new HGlobalObject);
+        VisitArgumentList(expr->arguments());
+        CHECK_BAILOUT;
+
+        VISIT_FOR_VALUE(expr->expression());
+        HValue* function = Pop();
+        AddInstruction(new HCheckFunction(function, expr->target()));
+
+        // Replace the global object with the global receiver.
+        HGlobalReceiver* global_receiver = new HGlobalReceiver;
+        // Index of the receiver from the top of the expression stack.
+        const int receiver_index = argument_count - 1;
+        AddInstruction(global_receiver);
+        ASSERT(environment()->ExpressionStackAt(receiver_index)->
+               IsGlobalObject());
+        environment()->SetExpressionStackAt(receiver_index, global_receiver);
+
+        if (TryInline(expr)) {
+          if (subgraph()->HasExit()) {
+            HValue* return_value = Pop();
+            // If we inlined a function in a test context then we need to
+            // emit a simulate here to shadow the ones at the end of the
+            // predecessor blocks.  Those environments contain the return
+            // value on top and do not correspond to any actual state of the
+            // unoptimized code.
+            if (ast_context()->IsEffect()) AddSimulate(expr->id());
+            ast_context()->ReturnValue(return_value);
+          }
+          return;
+        }
+        // Check for bailout, as trying to inline might fail due to bailout
+        // during hydrogen processing.
+        CHECK_BAILOUT;
+
+        call = new HCallKnownGlobal(expr->target(), argument_count);
+      } else {
+        PushAndAdd(new HGlobalObject);
+        VisitArgumentList(expr->arguments());
+        CHECK_BAILOUT;
+
+        call = new HCallGlobal(var->name(), argument_count);
+      }
+
+    } else {
+      PushAndAdd(new HGlobalReceiver);
+      VisitArgumentList(expr->arguments());
+      CHECK_BAILOUT;
+
+      call = new HCallFunction(argument_count);
+    }
+  }
+
+  call->set_position(expr->position());
+  ProcessCall(call);
+  ast_context()->ReturnInstruction(call, expr->id());
+}
+
+
+void HGraphBuilder::VisitCallNew(CallNew* expr) {
+  // The constructor function is also used as the receiver argument to the
+  // JS construct call builtin.
+  VisitArgument(expr->expression());
+  CHECK_BAILOUT;
+  VisitArgumentList(expr->arguments());
+  CHECK_BAILOUT;
+
+  int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
+  HCall* call = new HCallNew(argument_count);
+  call->set_position(expr->position());
+  ProcessCall(call);
+  ast_context()->ReturnInstruction(call, expr->id());
+}
+
+
+// Support for generating inlined runtime functions.
+
+// Lookup table for generators for runtime calls that are  generated inline.
+// Elements of the table are member pointers to functions of HGraphBuilder.
+#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)  \
+    &HGraphBuilder::Generate##Name,
+
+const HGraphBuilder::InlineFunctionGenerator
+    HGraphBuilder::kInlineFunctionGenerators[] = {
+        INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+        INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
+};
+#undef INLINE_FUNCTION_GENERATOR_ADDRESS
+
+
+void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->IsEqualTo(CStrVector("_Log"))) {
+    ast_context()->ReturnValue(graph()->GetConstantUndefined());
+    return;
+  }
+
+  Runtime::Function* function = expr->function();
+  if (expr->is_jsruntime()) {
+    BAILOUT("call to a JavaScript runtime function");
+  }
+  ASSERT(function != NULL);
+
+  VisitArgumentList(expr->arguments());
+  CHECK_BAILOUT;
+
+  int argument_count = expr->arguments()->length();
+  if (function->intrinsic_type == Runtime::INLINE) {
+    ASSERT(name->length() > 0);
+    ASSERT(name->Get(0) == '_');
+    // Call to an inline function.
+    int lookup_index = static_cast<int>(function->function_id) -
+        static_cast<int>(Runtime::kFirstInlineFunction);
+    ASSERT(lookup_index >= 0);
+    ASSERT(static_cast<size_t>(lookup_index) <
+           ARRAY_SIZE(kInlineFunctionGenerators));
+    InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
+
+    // Call the inline code generator using the pointer-to-member.
+    (this->*generator)(argument_count, expr->id());
+  } else {
+    ASSERT(function->intrinsic_type == Runtime::RUNTIME);
+    HCall* call = new HCallRuntime(name, expr->function(), argument_count);
+    call->set_position(RelocInfo::kNoPosition);
+    ProcessCall(call);
+    ast_context()->ReturnInstruction(call, expr->id());
+  }
+}
+
+
+void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  Token::Value op = expr->op();
+  if (op == Token::VOID) {
+    VISIT_FOR_EFFECT(expr->expression());
+    ast_context()->ReturnValue(graph()->GetConstantUndefined());
+  } else if (op == Token::DELETE) {
+    Property* prop = expr->expression()->AsProperty();
+    Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+    if (prop == NULL && var == NULL) {
+      // Result of deleting non-property, non-variable reference is true.
+      // Evaluate the subexpression for side effects.
+      VISIT_FOR_EFFECT(expr->expression());
+      ast_context()->ReturnValue(graph()->GetConstantTrue());
+    } else if (var != NULL &&
+               !var->is_global() &&
+               var->AsSlot() != NULL &&
+               var->AsSlot()->type() != Slot::LOOKUP) {
+      // Result of deleting non-global, non-dynamic variables is false.
+      // The subexpression does not have side effects.
+      ast_context()->ReturnValue(graph()->GetConstantFalse());
+    } else if (prop != NULL) {
+      VISIT_FOR_VALUE(prop->obj());
+      VISIT_FOR_VALUE(prop->key());
+      HValue* key = Pop();
+      HValue* obj = Pop();
+      ast_context()->ReturnInstruction(new HDeleteProperty(obj, key),
+                                       expr->id());
+    } else if (var->is_global()) {
+      BAILOUT("delete with global variable");
+    } else {
+      BAILOUT("delete with non-global variable");
+    }
+  } else if (op == Token::NOT) {
+    if (ast_context()->IsTest()) {
+      TestContext* context = TestContext::cast(ast_context());
+      VisitForControl(expr->expression(),
+                      context->if_false(),
+                      context->if_true());
+    } else {
+      HSubgraph* true_graph = CreateEmptySubgraph();
+      HSubgraph* false_graph = CreateEmptySubgraph();
+      VISIT_FOR_CONTROL(expr->expression(),
+                        false_graph->entry_block(),
+                        true_graph->entry_block());
+      true_graph->entry_block()->SetJoinId(expr->expression()->id());
+      true_graph->environment()->Push(graph_->GetConstantTrue());
+
+      false_graph->entry_block()->SetJoinId(expr->expression()->id());
+      false_graph->environment()->Push(graph_->GetConstantFalse());
+
+      current_subgraph_->AppendJoin(true_graph, false_graph, expr);
+      ast_context()->ReturnValue(Pop());
+    }
+  } else if (op == Token::BIT_NOT || op == Token::SUB) {
+    VISIT_FOR_VALUE(expr->expression());
+    HValue* value = Pop();
+    HInstruction* instr = NULL;
+    switch (op) {
+      case Token::BIT_NOT:
+        instr = new HBitNot(value);
+        break;
+      case Token::SUB:
+        instr = new HMul(graph_->GetConstantMinus1(), value);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    ast_context()->ReturnInstruction(instr, expr->id());
+  } else if (op == Token::TYPEOF) {
+    VISIT_FOR_VALUE(expr->expression());
+    HValue* value = Pop();
+    ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
+  } else {
+    BAILOUT("Value: unsupported unary operation");
+  }
+}
+
+
+void HGraphBuilder::VisitIncrementOperation(IncrementOperation* expr) {
+  // IncrementOperation is never visited by the visitor. It only
+  // occurs as a subexpression of CountOperation.
+  UNREACHABLE();
+}
+
+
+HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
+  HConstant* delta = increment
+      ? graph_->GetConstant1()
+      : graph_->GetConstantMinus1();
+  HInstruction* instr = new HAdd(value, delta);
+  AssumeRepresentation(instr,  Representation::Integer32());
+  return instr;
+}
+
+
+void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  IncrementOperation* increment = expr->increment();
+  Expression* target = increment->expression();
+  VariableProxy* proxy = target->AsVariableProxy();
+  Variable* var = proxy->AsVariable();
+  Property* prop = target->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+  bool inc = expr->op() == Token::INC;
+
+  if (var != NULL) {
+    if (!var->is_global() && !var->IsStackAllocated()) {
+      BAILOUT("non-stack/non-global variable in count operation");
+    }
+
+    VISIT_FOR_VALUE(target);
+
+    // Match the full code generator stack by simulating an extra stack
+    // element for postfix operations in a non-effect context.
+    bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
+    HValue* before = has_extra ? Top() : Pop();
+    HInstruction* after = BuildIncrement(before, inc);
+    AddInstruction(after);
+    Push(after);
+
+    if (var->is_global()) {
+      HandleGlobalVariableAssignment(var,
+                                     after,
+                                     expr->position(),
+                                     expr->AssignmentId());
+    } else {
+      ASSERT(var->IsStackAllocated());
+      Bind(var, after);
+    }
+    Drop(has_extra ? 2 : 1);
+    ast_context()->ReturnValue(expr->is_postfix() ? before : after);
+
+  } else if (prop != NULL) {
+    prop->RecordTypeFeedback(oracle());
+
+    if (prop->key()->IsPropertyName()) {
+      // Named property.
+
+      // Match the full code generator stack by simulating an extra stack
+      // element for postfix operations in a non-effect context.
+      bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
+      if (has_extra) Push(graph_->GetConstantUndefined());
+
+      VISIT_FOR_VALUE(prop->obj());
+      HValue* obj = Top();
+
+      HInstruction* load = NULL;
+      if (prop->IsMonomorphic()) {
+        Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+        Handle<Map> map = prop->GetReceiverTypes()->first();
+        load = BuildLoadNamed(obj, prop, map, name);
+      } else {
+        load = BuildLoadNamedGeneric(obj, prop);
+      }
+      PushAndAdd(load);
+      if (load->HasSideEffects()) AddSimulate(increment->id());
+
+      HValue* before = Pop();
+      // There is no deoptimization to after the increment, so we don't need
+      // to simulate the expression stack after this instruction.
+      HInstruction* after = BuildIncrement(before, inc);
+      AddInstruction(after);
+
+      HInstruction* store = BuildStoreNamed(obj, after, prop);
+      AddInstruction(store);
+
+      // Overwrite the receiver in the bailout environment with the result
+      // of the operation, and the placeholder with the original value if
+      // necessary.
+      environment()->SetExpressionStackAt(0, after);
+      if (has_extra) environment()->SetExpressionStackAt(1, before);
+      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      Drop(has_extra ? 2 : 1);
+
+      ast_context()->ReturnValue(expr->is_postfix() ? before : after);
+
+    } else {
+      // Keyed property.
+
+      // Match the full code generator stack by simulate an extra stack element
+      // for postfix operations in a non-effect context.
+      bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
+      if (has_extra) Push(graph_->GetConstantUndefined());
+
+      VISIT_FOR_VALUE(prop->obj());
+      VISIT_FOR_VALUE(prop->key());
+      HValue* obj = environment()->ExpressionStackAt(1);
+      HValue* key = environment()->ExpressionStackAt(0);
+
+      bool is_fast_elements = prop->IsMonomorphic() &&
+          prop->GetMonomorphicReceiverType()->has_fast_elements();
+
+      HInstruction* load = is_fast_elements
+          ? BuildLoadKeyedFastElement(obj, key, prop)
+          : BuildLoadKeyedGeneric(obj, key);
+      PushAndAdd(load);
+      if (load->HasSideEffects()) AddSimulate(increment->id());
+
+      HValue* before = Pop();
+      // There is no deoptimization to after the increment, so we don't need
+      // to simulate the expression stack after this instruction.
+      HInstruction* after = BuildIncrement(before, inc);
+      AddInstruction(after);
+
+      HInstruction* store = is_fast_elements
+          ? BuildStoreKeyedFastElement(obj, key, after, prop)
+          : new HStoreKeyedGeneric(obj, key, after);
+      AddInstruction(store);
+
+      // Drop the key from the bailout environment.  Overwrite the receiver
+      // with the result of the operation, and the placeholder with the
+      // original value if necessary.
+      Drop(1);
+      environment()->SetExpressionStackAt(0, after);
+      if (has_extra) environment()->SetExpressionStackAt(1, before);
+      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
+      Drop(has_extra ? 2 : 1);
+
+      ast_context()->ReturnValue(expr->is_postfix() ? before : after);
+    }
+
+  } else {
+    BAILOUT("invalid lhs in count operation");
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
+                                                  HValue* left,
+                                                  HValue* right) {
+  HInstruction* instr = NULL;
+  switch (expr->op()) {
+    case Token::ADD:
+      instr = new HAdd(left, right);
+      break;
+    case Token::SUB:
+      instr = new HSub(left, right);
+      break;
+    case Token::MUL:
+      instr = new HMul(left, right);
+      break;
+    case Token::MOD:
+      instr = new HMod(left, right);
+      break;
+    case Token::DIV:
+      instr = new HDiv(left, right);
+      break;
+    case Token::BIT_XOR:
+      instr = new HBitXor(left, right);
+      break;
+    case Token::BIT_AND:
+      instr = new HBitAnd(left, right);
+      break;
+    case Token::BIT_OR:
+      instr = new HBitOr(left, right);
+      break;
+    case Token::SAR:
+      instr = new HSar(left, right);
+      break;
+    case Token::SHR:
+      instr = new HShr(left, right);
+      break;
+    case Token::SHL:
+      instr = new HShl(left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  TypeInfo info = oracle()->BinaryType(expr, TypeFeedbackOracle::RESULT);
+  // If we hit an uninitialized binary op stub we will get type info
+  // for a smi operation. If one of the operands is a constant string
+  // do not generate code assuming it is a smi operation.
+  if (info.IsSmi() &&
+      ((left->IsConstant() && HConstant::cast(left)->HasStringValue()) ||
+       (right->IsConstant() && HConstant::cast(right)->HasStringValue()))) {
+    return instr;
+  }
+  if (FLAG_trace_representation) {
+    PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
+  }
+  AssumeRepresentation(instr, ToRepresentation(info));
+  return instr;
+}
+
+
+// Check for the form (%_ClassOf(foo) === 'BarClass').
+static bool IsClassOfTest(CompareOperation* expr) {
+  if (expr->op() != Token::EQ_STRICT) return false;
+  CallRuntime* call = expr->left()->AsCallRuntime();
+  if (call == NULL) return false;
+  Literal* literal = expr->right()->AsLiteral();
+  if (literal == NULL) return false;
+  if (!literal->handle()->IsString()) return false;
+  if (!call->name()->IsEqualTo(CStrVector("_ClassOf"))) return false;
+  ASSERT(call->arguments()->length() == 1);
+  return true;
+}
+
+
+void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  if (expr->op() == Token::COMMA) {
+    VISIT_FOR_EFFECT(expr->left());
+    // Visit the right subexpression in the same AST context as the entire
+    // expression.
+    Visit(expr->right());
+
+  } else if (expr->op() == Token::AND || expr->op() == Token::OR) {
+    bool is_logical_and = (expr->op() == Token::AND);
+    if (ast_context()->IsTest()) {
+      TestContext* context = TestContext::cast(ast_context());
+      // Translate left subexpression.
+      HBasicBlock* eval_right = graph()->CreateBasicBlock();
+      if (is_logical_and) {
+        VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
+      } else {
+        VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
+      }
+      eval_right->SetJoinId(expr->RightId());
+
+      // Translate right subexpression by visiting it in the same AST
+      // context as the entire expression.
+      subgraph()->set_exit_block(eval_right);
+      Visit(expr->right());
+
+    } else {
+      VISIT_FOR_VALUE(expr->left());
+      ASSERT(current_subgraph_->HasExit());
+
+      HValue* left = Top();
+      HEnvironment* environment_copy = environment()->Copy();
+      environment_copy->Pop();
+      HSubgraph* right_subgraph;
+      right_subgraph = CreateBranchSubgraph(environment_copy);
+      ADD_TO_SUBGRAPH(right_subgraph, expr->right());
+      current_subgraph_->AppendOptional(right_subgraph, is_logical_and, left);
+      current_subgraph_->exit_block()->SetJoinId(expr->id());
+      ast_context()->ReturnValue(Pop());
+    }
+
+  } else {
+    VISIT_FOR_VALUE(expr->left());
+    VISIT_FOR_VALUE(expr->right());
+
+    HValue* right = Pop();
+    HValue* left = Pop();
+    HInstruction* instr = BuildBinaryOperation(expr, left, right);
+    instr->set_position(expr->position());
+    ast_context()->ReturnInstruction(instr, expr->id());
+  }
+}
+
+
+void HGraphBuilder::AssumeRepresentation(HValue* value, Representation r) {
+  if (value->CheckFlag(HValue::kFlexibleRepresentation)) {
+    if (FLAG_trace_representation) {
+      PrintF("Assume representation for %s to be %s (%d)\n",
+             value->Mnemonic(),
+             r.Mnemonic(),
+             graph_->GetMaximumValueID());
+    }
+    value->ChangeRepresentation(r);
+    // The representation of the value is dictated by type feedback.
+    value->ClearFlag(HValue::kFlexibleRepresentation);
+  } else if (FLAG_trace_representation) {
+    PrintF("No representation assumed\n");
+  }
+}
+
+
+Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+  if (info.IsSmi()) return Representation::Integer32();
+  if (info.IsInteger32()) return Representation::Integer32();
+  if (info.IsDouble()) return Representation::Double();
+  if (info.IsNumber()) return Representation::Double();
+  return Representation::Tagged();
+}
+
+
+void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  if (IsClassOfTest(expr)) {
+    CallRuntime* call = expr->left()->AsCallRuntime();
+    VISIT_FOR_VALUE(call->arguments()->at(0));
+    HValue* value = Pop();
+    Literal* literal = expr->right()->AsLiteral();
+    Handle<String> rhs = Handle<String>::cast(literal->handle());
+    HInstruction* instr = new HClassOfTest(value, rhs);
+    instr->set_position(expr->position());
+    ast_context()->ReturnInstruction(instr, expr->id());
+    return;
+  }
+
+  // Check for the pattern: typeof <expression> == <string literal>.
+  UnaryOperation* left_unary = expr->left()->AsUnaryOperation();
+  Literal* right_literal = expr->right()->AsLiteral();
+  if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
+      left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+      right_literal != NULL && right_literal->handle()->IsString()) {
+    VISIT_FOR_VALUE(left_unary->expression());
+    HValue* left = Pop();
+    HInstruction* instr = new HTypeofIs(left,
+        Handle<String>::cast(right_literal->handle()));
+    instr->set_position(expr->position());
+    ast_context()->ReturnInstruction(instr, expr->id());
+    return;
+  }
+
+  VISIT_FOR_VALUE(expr->left());
+  VISIT_FOR_VALUE(expr->right());
+
+  HValue* right = Pop();
+  HValue* left = Pop();
+  Token::Value op = expr->op();
+
+  TypeInfo info = oracle()->CompareType(expr, TypeFeedbackOracle::RESULT);
+  HInstruction* instr = NULL;
+  if (op == Token::INSTANCEOF) {
+    instr = new HInstanceOf(left, right);
+  } else if (op == Token::IN) {
+    BAILOUT("Unsupported comparison: in");
+  } else if (info.IsNonPrimitive()) {
+    switch (op) {
+      case Token::EQ:
+      case Token::EQ_STRICT: {
+        AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
+        AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
+        instr = new HCompareJSObjectEq(left, right);
+        break;
+      }
+      default:
+        BAILOUT("Unsupported non-primitive compare");
+        break;
+    }
+  } else {
+    HCompare* compare = new HCompare(left, right, op);
+    Representation r = ToRepresentation(info);
+    compare->SetInputRepresentation(r);
+    instr = compare;
+  }
+  instr->set_position(expr->position());
+  ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
+  VISIT_FOR_VALUE(expr->expression());
+
+  HValue* value = Pop();
+  HIsNull* compare = new HIsNull(value, expr->is_strict());
+  ast_context()->ReturnInstruction(compare, expr->id());
+}
+
+
+void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  BAILOUT("ThisFunction");
+}
+
+
+void HGraphBuilder::VisitDeclaration(Declaration* decl) {
+  // We allow only declarations that do not require code generation.
+  // The following all require code generation: global variables and
+  // functions, variables with slot type LOOKUP, declarations with
+  // mode CONST, and functions.
+  Variable* var = decl->proxy()->var();
+  Slot* slot = var->AsSlot();
+  if (var->is_global() ||
+      (slot != NULL && slot->type() == Slot::LOOKUP) ||
+      decl->mode() == Variable::CONST ||
+      decl->fun() != NULL) {
+    BAILOUT("unsupported declaration");
+  }
+}
+
+
+// Generators for inline runtime functions.
+// Support for types.
+void HGraphBuilder::GenerateIsSmi(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HIsSmi* result = new HIsSmi(value);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsSpecObject(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HHasInstanceType* result =
+      new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsFunction(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateHasCachedArrayIndex(int argument_count,
+                                                int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsArray(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsRegExp(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsObject(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+
+  HValue* value = Pop();
+  HIsObject* test = new HIsObject(value);
+  ast_context()->ReturnInstruction(test, ast_id);
+}
+
+
+void HGraphBuilder::GenerateIsNonNegativeSmi(int argument_count,
+                                             int ast_id) {
+  BAILOUT("inlined runtime function: IsNonNegativeSmi");
+}
+
+
+void HGraphBuilder::GenerateIsUndetectableObject(int argument_count,
+                                                 int ast_id) {
+  BAILOUT("inlined runtime function: IsUndetectableObject");
+}
+
+
+void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+    int argument_count,
+    int ast_id) {
+  BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+}
+
+
+  // Support for construct call checks.
+void HGraphBuilder::GenerateIsConstructCall(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: IsConstructCall");
+}
+
+
+// Support for arguments.length and arguments[?].
+void HGraphBuilder::GenerateArgumentsLength(int argument_count, int ast_id) {
+  ASSERT(argument_count == 0);
+  HInstruction* elements = AddInstruction(new HArgumentsElements);
+  HArgumentsLength* result = new HArgumentsLength(elements);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateArguments(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* index = Pop();
+  HInstruction* elements = AddInstruction(new HArgumentsElements);
+  HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+  HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Support for accessing the class and value fields of an object.
+void HGraphBuilder::GenerateClassOf(int argument_count, int ast_id) {
+  // The special form detected by IsClassOfTest is detected before we get here
+  // and does not cause a bailout.
+  BAILOUT("inlined runtime function: ClassOf");
+}
+
+
+void HGraphBuilder::GenerateValueOf(int argument_count, int ast_id) {
+  ASSERT(argument_count == 1);
+  HValue* value = Pop();
+  HValueOf* result = new HValueOf(value);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateSetValueOf(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: SetValueOf");
+}
+
+
+// Fast support for charCodeAt(n).
+void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: StringCharCodeAt");
+}
+
+
+// Fast support for string.charAt(n) and string[n].
+void HGraphBuilder::GenerateStringCharFromCode(int argument_count,
+                                               int ast_id) {
+  BAILOUT("inlined runtime function: StringCharFromCode");
+}
+
+
+// Fast support for string.charAt(n) and string[n].
+void HGraphBuilder::GenerateStringCharAt(int argument_count, int ast_id) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result = new HCallStub(CodeStub::StringCharAt, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Fast support for object equality testing.
+void HGraphBuilder::GenerateObjectEquals(int argument_count, int ast_id) {
+  ASSERT(argument_count == 2);
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateLog(int argument_count, int ast_id) {
+  UNREACHABLE();  // We caught this in VisitCallRuntime.
+}
+
+
+// Fast support for Math.random().
+void HGraphBuilder::GenerateRandomHeapNumber(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: RandomHeapNumber");
+}
+
+
+// Fast support for StringAdd.
+void HGraphBuilder::GenerateStringAdd(int argument_count, int ast_id) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result = new HCallStub(CodeStub::StringAdd, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Fast support for SubString.
+void HGraphBuilder::GenerateSubString(int argument_count, int ast_id) {
+  ASSERT_EQ(3, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result = new HCallStub(CodeStub::SubString, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Fast support for StringCompare.
+void HGraphBuilder::GenerateStringCompare(int argument_count, int ast_id) {
+  ASSERT_EQ(2, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result = new HCallStub(CodeStub::StringCompare, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Support for direct calls from JavaScript to native RegExp code.
+void HGraphBuilder::GenerateRegExpExec(int argument_count, int ast_id) {
+  ASSERT_EQ(4, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result = new HCallStub(CodeStub::RegExpExec, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Construct a RegExp exec result with two in-object properties.
+void HGraphBuilder::GenerateRegExpConstructResult(int argument_count,
+                                                  int ast_id) {
+  ASSERT_EQ(3, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result =
+      new HCallStub(CodeStub::RegExpConstructResult, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Support for fast native caches.
+void HGraphBuilder::GenerateGetFromCache(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: GetFromCache");
+}
+
+
+// Fast support for number to string.
+void HGraphBuilder::GenerateNumberToString(int argument_count, int ast_id) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result = new HCallStub(CodeStub::NumberToString, argument_count);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+// Fast swapping of elements. Takes three expressions, the object and two
+// indices. This should only be used if the indices are known to be
+// non-negative and within bounds of the elements array at the call site.
+void HGraphBuilder::GenerateSwapElements(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: SwapElements");
+}
+
+
+// Fast call for custom callbacks.
+void HGraphBuilder::GenerateCallFunction(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: CallFunction");
+}
+
+
+// Fast call to math functions.
+void HGraphBuilder::GenerateMathPow(int argument_count, int ast_id) {
+  ASSERT_EQ(2, argument_count);
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HPower* result = new HPower(left, right);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateMathSin(int argument_count, int ast_id) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result =
+      new HCallStub(CodeStub::TranscendentalCache, argument_count);
+  result->set_transcendental_type(TranscendentalCache::SIN);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateMathCos(int argument_count, int ast_id) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result =
+      new HCallStub(CodeStub::TranscendentalCache, argument_count);
+  result->set_transcendental_type(TranscendentalCache::COS);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateMathLog(int argument_count, int ast_id) {
+  ASSERT_EQ(1, argument_count);
+  PushArgumentsForStubCall(argument_count);
+  HCallStub* result =
+      new HCallStub(CodeStub::TranscendentalCache, argument_count);
+  result->set_transcendental_type(TranscendentalCache::LOG);
+  ast_context()->ReturnInstruction(result, ast_id);
+}
+
+
+void HGraphBuilder::GenerateMathSqrt(int argument_count, int ast_id) {
+  BAILOUT("inlined runtime function: MathSqrt");
+}
+
+
+// Check whether two RegExps are equivalent
+void HGraphBuilder::GenerateIsRegExpEquivalent(int argument_count,
+                                               int ast_id) {
+  BAILOUT("inlined runtime function: IsRegExpEquivalent");
+}
+
+
+void HGraphBuilder::GenerateGetCachedArrayIndex(int argument_count,
+                                                int ast_id) {
+  BAILOUT("inlined runtime function: GetCachedArrayIndex");
+}
+
+
+void HGraphBuilder::GenerateFastAsciiArrayJoin(int argument_count,
+                                               int ast_id) {
+  BAILOUT("inlined runtime function: FastAsciiArrayJoin");
+}
+
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+#undef VISIT_FOR_EFFECT
+#undef VISIT_FOR_VALUE
+#undef ADD_TO_SUBGRAPH
+
+
+HEnvironment::HEnvironment(HEnvironment* outer,
+                           Scope* scope,
+                           Handle<JSFunction> closure)
+    : closure_(closure),
+      values_(0),
+      assigned_variables_(4),
+      parameter_count_(0),
+      local_count_(0),
+      outer_(outer),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(AstNode::kNoNumber) {
+  Initialize(scope->num_parameters() + 1, scope->num_stack_slots(), 0);
+}
+
+
+HEnvironment::HEnvironment(const HEnvironment* other)
+    : values_(0),
+      assigned_variables_(0),
+      parameter_count_(0),
+      local_count_(0),
+      outer_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(other->ast_id()) {
+  Initialize(other);
+}
+
+
+void HEnvironment::Initialize(int parameter_count,
+                              int local_count,
+                              int stack_height) {
+  parameter_count_ = parameter_count;
+  local_count_ = local_count;
+
+  // Avoid reallocating the temporaries' backing store on the first Push.
+  int total = parameter_count + local_count + stack_height;
+  values_.Initialize(total + 4);
+  for (int i = 0; i < total; ++i) values_.Add(NULL);
+}
+
+
+void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
+  ASSERT(!block->IsLoopHeader());
+  ASSERT(values_.length() == other->values_.length());
+
+  int length = values_.length();
+  for (int i = 0; i < length; ++i) {
+    HValue* value = values_[i];
+    if (value != NULL && value->IsPhi() && value->block() == block) {
+      // There is already a phi for the i'th value.
+      HPhi* phi = HPhi::cast(value);
+      // Assert index is correct and that we haven't missed an incoming edge.
+      ASSERT(phi->merged_index() == i);
+      ASSERT(phi->OperandCount() == block->predecessors()->length());
+      phi->AddInput(other->values_[i]);
+    } else if (values_[i] != other->values_[i]) {
+      // There is a fresh value on the incoming edge, a phi is needed.
+      ASSERT(values_[i] != NULL && other->values_[i] != NULL);
+      HPhi* phi = new HPhi(i);
+      HValue* old_value = values_[i];
+      for (int j = 0; j < block->predecessors()->length(); j++) {
+        phi->AddInput(old_value);
+      }
+      phi->AddInput(other->values_[i]);
+      this->values_[i] = phi;
+      block->AddPhi(phi);
+    }
+  }
+}
+
+
+void HEnvironment::Initialize(const HEnvironment* other) {
+  closure_ = other->closure();
+  values_.AddAll(other->values_);
+  assigned_variables_.AddAll(other->assigned_variables_);
+  parameter_count_ = other->parameter_count_;
+  local_count_ = other->local_count_;
+  if (other->outer_ != NULL) outer_ = other->outer_->Copy();  // Deep copy.
+  pop_count_ = other->pop_count_;
+  push_count_ = other->push_count_;
+  ast_id_ = other->ast_id_;
+}
+
+
+int HEnvironment::IndexFor(Variable* variable) const {
+  Slot* slot = variable->AsSlot();
+  ASSERT(slot != NULL && slot->IsStackAllocated());
+  if (slot->type() == Slot::PARAMETER) {
+    return slot->index() + 1;
+  } else {
+    return parameter_count_ + slot->index();
+  }
+}
+
+
+HEnvironment* HEnvironment::Copy() const {
+  return new HEnvironment(this);
+}
+
+
+HEnvironment* HEnvironment::CopyWithoutHistory() const {
+  HEnvironment* result = Copy();
+  result->ClearHistory();
+  return result;
+}
+
+
+HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
+  HEnvironment* new_env = Copy();
+  for (int i = 0; i < values_.length(); ++i) {
+    HPhi* phi = new HPhi(i);
+    phi->AddInput(values_[i]);
+    new_env->values_[i] = phi;
+    loop_header->AddPhi(phi);
+  }
+  new_env->ClearHistory();
+  return new_env;
+}
+
+
+HEnvironment* HEnvironment::CopyForInlining(Handle<JSFunction> target,
+                                            FunctionLiteral* function,
+                                            bool is_speculative,
+                                            HConstant* undefined) const {
+  // Outer environment is a copy of this one without the arguments.
+  int arity = function->scope()->num_parameters();
+  HEnvironment* outer = Copy();
+  outer->Drop(arity + 1);  // Including receiver.
+  outer->ClearHistory();
+  HEnvironment* inner = new HEnvironment(outer, function->scope(), target);
+  // Get the argument values from the original environment.
+  if (is_speculative) {
+    for (int i = 0; i <= arity; ++i) {  // Include receiver.
+      HValue* push = ExpressionStackAt(arity - i);
+      inner->SetValueAt(i, push);
+    }
+  } else {
+    for (int i = 0; i <= arity; ++i) {  // Include receiver.
+      inner->SetValueAt(i, ExpressionStackAt(arity - i));
+    }
+  }
+
+  // Initialize the stack-allocated locals to undefined.
+  int local_base = arity + 1;
+  int local_count = function->scope()->num_stack_slots();
+  for (int i = 0; i < local_count; ++i) {
+    inner->SetValueAt(local_base + i, undefined);
+  }
+
+  inner->set_ast_id(function->id());
+  return inner;
+}
+
+
+void HEnvironment::PrintTo(StringStream* stream) {
+  for (int i = 0; i < total_count(); i++) {
+    if (i == 0) stream->Add("parameters\n");
+    if (i == parameter_count()) stream->Add("locals\n");
+    if (i == parameter_count() + local_count()) stream->Add("expressions");
+    HValue* val = values_.at(i);
+    stream->Add("%d: ", i);
+    if (val != NULL) {
+      val->PrintNameTo(stream);
+    } else {
+      stream->Add("NULL");
+    }
+    stream->Add("\n");
+  }
+}
+
+
+void HEnvironment::PrintToStd() {
+  HeapStringAllocator string_allocator;
+  StringStream trace(&string_allocator);
+  PrintTo(&trace);
+  PrintF("%s", *trace.ToCString());
+}
+
+
+void HTracer::TraceCompilation(FunctionLiteral* function) {
+  Tag tag(this, "compilation");
+  Handle<String> name = function->debug_name();
+  PrintStringProperty("name", *name->ToCString());
+  PrintStringProperty("method", *name->ToCString());
+  PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
+}
+
+
+void HTracer::TraceLithium(const char* name, LChunk* chunk) {
+  Trace(name, chunk->graph(), chunk);
+}
+
+
+void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
+  Trace(name, graph, NULL);
+}
+
+
+void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
+  Tag tag(this, "cfg");
+  PrintStringProperty("name", name);
+  const ZoneList<HBasicBlock*>* blocks = graph->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* current = blocks->at(i);
+    Tag block_tag(this, "block");
+    PrintBlockProperty("name", current->block_id());
+    PrintIntProperty("from_bci", -1);
+    PrintIntProperty("to_bci", -1);
+
+    if (!current->predecessors()->is_empty()) {
+      PrintIndent();
+      trace_.Add("predecessors");
+      for (int j = 0; j < current->predecessors()->length(); ++j) {
+        trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
+      }
+      trace_.Add("\n");
+    } else {
+      PrintEmptyProperty("predecessors");
+    }
+
+    if (current->end() == NULL || current->end()->FirstSuccessor() == NULL) {
+      PrintEmptyProperty("successors");
+    } else if (current->end()->SecondSuccessor() == NULL) {
+      PrintBlockProperty("successors",
+                             current->end()->FirstSuccessor()->block_id());
+    } else {
+      PrintBlockProperty("successors",
+                             current->end()->FirstSuccessor()->block_id(),
+                             current->end()->SecondSuccessor()->block_id());
+    }
+
+    PrintEmptyProperty("xhandlers");
+    PrintEmptyProperty("flags");
+
+    if (current->dominator() != NULL) {
+      PrintBlockProperty("dominator", current->dominator()->block_id());
+    }
+
+    if (chunk != NULL) {
+      int first_index = current->first_instruction_index();
+      int last_index = current->last_instruction_index();
+      PrintIntProperty(
+          "first_lir_id",
+          LifetimePosition::FromInstructionIndex(first_index).Value());
+      PrintIntProperty(
+          "last_lir_id",
+          LifetimePosition::FromInstructionIndex(last_index).Value());
+    }
+
+    {
+      Tag states_tag(this, "states");
+      Tag locals_tag(this, "locals");
+      int total = current->phis()->length();
+      trace_.Add("size %d\n", total);
+      trace_.Add("method \"None\"");
+      for (int j = 0; j < total; ++j) {
+        HPhi* phi = current->phis()->at(j);
+        trace_.Add("%d ", phi->merged_index());
+        phi->PrintNameTo(&trace_);
+        trace_.Add(" ");
+        phi->PrintTo(&trace_);
+        trace_.Add("\n");
+      }
+    }
+
+    {
+      Tag HIR_tag(this, "HIR");
+      HInstruction* instruction = current->first();
+      while (instruction != NULL) {
+        int bci = 0;
+        int uses = instruction->uses()->length();
+        trace_.Add("%d %d ", bci, uses);
+        instruction->PrintNameTo(&trace_);
+        trace_.Add(" ");
+        instruction->PrintTo(&trace_);
+        trace_.Add(" <|@\n");
+        instruction = instruction->next();
+      }
+    }
+
+
+    if (chunk != NULL) {
+      Tag LIR_tag(this, "LIR");
+      int first_index = current->first_instruction_index();
+      int last_index = current->last_instruction_index();
+      if (first_index != -1 && last_index != -1) {
+        const ZoneList<LInstruction*>* instructions = chunk->instructions();
+        for (int i = first_index; i <= last_index; ++i) {
+          LInstruction* linstr = instructions->at(i);
+          if (linstr != NULL) {
+            trace_.Add("%d ",
+                       LifetimePosition::FromInstructionIndex(i).Value());
+            linstr->PrintTo(&trace_);
+            trace_.Add(" <|@\n");
+          }
+        }
+      }
+    }
+  }
+}
+
+
+void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
+  Tag tag(this, "intervals");
+  PrintStringProperty("name", name);
+
+  const ZoneList<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
+  for (int i = 0; i < fixed_d->length(); ++i) {
+    TraceLiveRange(fixed_d->at(i), "fixed");
+  }
+
+  const ZoneList<LiveRange*>* fixed = allocator->fixed_live_ranges();
+  for (int i = 0; i < fixed->length(); ++i) {
+    TraceLiveRange(fixed->at(i), "fixed");
+  }
+
+  const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
+  for (int i = 0; i < live_ranges->length(); ++i) {
+    TraceLiveRange(live_ranges->at(i), "object");
+  }
+}
+
+
+void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
+  if (range != NULL && !range->IsEmpty()) {
+    trace_.Add("%d %s", range->id(), type);
+    if (range->HasRegisterAssigned()) {
+      LOperand* op = range->CreateAssignedOperand();
+      int assigned_reg = op->index();
+      if (op->IsDoubleRegister()) {
+        trace_.Add(" \"%s\"",
+                   DoubleRegister::AllocationIndexToString(assigned_reg));
+      } else {
+        ASSERT(op->IsRegister());
+        trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
+      }
+    } else if (range->IsSpilled()) {
+      LOperand* op = range->TopLevel()->GetSpillOperand();
+      if (op->IsDoubleStackSlot()) {
+        trace_.Add(" \"double_stack:%d\"", op->index());
+      } else {
+        ASSERT(op->IsStackSlot());
+        trace_.Add(" \"stack:%d\"", op->index());
+      }
+    }
+    int parent_index = -1;
+    if (range->IsChild()) {
+      parent_index = range->parent()->id();
+    } else {
+      parent_index = range->id();
+    }
+    LOperand* op = range->FirstHint();
+    int hint_index = -1;
+    if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
+    trace_.Add(" %d %d", parent_index, hint_index);
+    UseInterval* cur_interval = range->first_interval();
+    while (cur_interval != NULL) {
+      trace_.Add(" [%d, %d[",
+                 cur_interval->start().Value(),
+                 cur_interval->end().Value());
+      cur_interval = cur_interval->next();
+    }
+
+    UsePosition* current_pos = range->first_pos();
+    while (current_pos != NULL) {
+      if (current_pos->RegisterIsBeneficial()) {
+        trace_.Add(" %d M", current_pos->pos().Value());
+      }
+      current_pos = current_pos->next();
+    }
+
+    trace_.Add(" \"\"\n");
+  }
+}
+
+
+void HTracer::FlushToFile() {
+  AppendChars(filename_, *trace_.ToCString(), trace_.length(), false);
+  trace_.Reset();
+}
+
+
+void HStatistics::Print() {
+  PrintF("Timing results:\n");
+  int64_t sum = 0;
+  for (int i = 0; i < timing_.length(); ++i) {
+    sum += timing_[i];
+  }
+
+  for (int i = 0; i < names_.length(); ++i) {
+    PrintF("%30s", names_[i]);
+    double ms = static_cast<double>(timing_[i]) / 1000;
+    double percent = static_cast<double>(timing_[i]) * 100 / sum;
+    PrintF(" - %0.3f ms / %0.3f %% \n", ms, percent);
+  }
+  PrintF("%30s - %0.3f ms \n", "Sum", static_cast<double>(sum) / 1000);
+  PrintF("---------------------------------------------------------------\n");
+  PrintF("%30s - %0.3f ms (%0.1f times slower than full code gen)\n",
+         "Total",
+         static_cast<double>(total_) / 1000,
+         static_cast<double>(total_) / full_code_gen_);
+}
+
+
+void HStatistics::SaveTiming(const char* name, int64_t ticks) {
+  if (name == HPhase::kFullCodeGen) {
+    full_code_gen_ += ticks;
+  } else if (name == HPhase::kTotal) {
+    total_ += ticks;
+  } else {
+    for (int i = 0; i < names_.length(); ++i) {
+      if (names_[i] == name) {
+        timing_[i] += ticks;
+        return;
+      }
+    }
+    names_.Add(name);
+    timing_.Add(ticks);
+  }
+}
+
+
+const char* const HPhase::kFullCodeGen = "Full code generator";
+const char* const HPhase::kTotal = "Total";
+
+
+void HPhase::Begin(const char* name,
+                   HGraph* graph,
+                   LChunk* chunk,
+                   LAllocator* allocator) {
+  name_ = name;
+  graph_ = graph;
+  chunk_ = chunk;
+  allocator_ = allocator;
+  if (allocator != NULL && chunk_ == NULL) {
+    chunk_ = allocator->chunk();
+  }
+  if (FLAG_time_hydrogen) start_ = OS::Ticks();
+}
+
+
+void HPhase::End() const {
+  if (FLAG_time_hydrogen) {
+    int64_t end = OS::Ticks();
+    HStatistics::Instance()->SaveTiming(name_, end - start_);
+  }
+
+  if (FLAG_trace_hydrogen) {
+    if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
+    if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
+    if (allocator_ != NULL) {
+      HTracer::Instance()->TraceLiveRanges(name_, allocator_);
+    }
+  }
+
+#ifdef DEBUG
+  if (graph_ != NULL) graph_->Verify();
+  if (chunk_ != NULL) chunk_->Verify();
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+
+} }  // namespace v8::internal
diff --git a/src/hydrogen.h b/src/hydrogen.h
new file mode 100644
index 0000000..ebabf3d
--- /dev/null
+++ b/src/hydrogen.h
@@ -0,0 +1,1070 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HYDROGEN_H_
+#define V8_HYDROGEN_H_
+
+#include "v8.h"
+
+#include "ast.h"
+#include "compiler.h"
+#include "data-flow.h"
+#include "hydrogen-instructions.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HEnvironment;
+class HGraph;
+class HLoopInformation;
+class HTracer;
+class LAllocator;
+class LChunk;
+class LiveRange;
+
+
+class HBasicBlock: public ZoneObject {
+ public:
+  explicit HBasicBlock(HGraph* graph);
+  virtual ~HBasicBlock() { }
+
+  // Simple accessors.
+  int block_id() const { return block_id_; }
+  void set_block_id(int id) { block_id_ = id; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<HPhi*>* phis() const { return &phis_; }
+  HInstruction* first() const { return first_; }
+  HInstruction* GetLastInstruction();
+  HControlInstruction* end() const { return end_; }
+  HLoopInformation* loop_information() const { return loop_information_; }
+  const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
+  bool HasPredecessor() const { return predecessors_.length() > 0; }
+  const ZoneList<HBasicBlock*>* dominated_blocks() const {
+    return &dominated_blocks_;
+  }
+  const ZoneList<int>* deleted_phis() const {
+    return &deleted_phis_;
+  }
+  void RecordDeletedPhi(int merge_index) {
+    deleted_phis_.Add(merge_index);
+  }
+  HBasicBlock* dominator() const { return dominator_; }
+  HEnvironment* last_environment() const { return last_environment_; }
+  int argument_count() const { return argument_count_; }
+  void set_argument_count(int count) { argument_count_ = count; }
+  int first_instruction_index() const { return first_instruction_index_; }
+  void set_first_instruction_index(int index) {
+    first_instruction_index_ = index;
+  }
+  int last_instruction_index() const { return last_instruction_index_; }
+  void set_last_instruction_index(int index) {
+    last_instruction_index_ = index;
+  }
+
+  void AttachLoopInformation();
+  void DetachLoopInformation();
+  bool IsLoopHeader() const { return loop_information() != NULL; }
+  bool IsStartBlock() const { return block_id() == 0; }
+  void PostProcessLoopHeader(IterationStatement* stmt);
+
+  bool IsFinished() const { return end_ != NULL; }
+  void AddPhi(HPhi* phi);
+  void RemovePhi(HPhi* phi);
+  void AddInstruction(HInstruction* instr);
+  bool Dominates(HBasicBlock* other) const;
+
+  void SetInitialEnvironment(HEnvironment* env);
+  void ClearEnvironment() { last_environment_ = NULL; }
+  bool HasEnvironment() const { return last_environment_ != NULL; }
+  void UpdateEnvironment(HEnvironment* env) { last_environment_ = env; }
+  HBasicBlock* parent_loop_header() const {
+    if (!HasParentLoopHeader()) return NULL;
+    return parent_loop_header_.get();
+  }
+
+  void set_parent_loop_header(HBasicBlock* block) {
+    parent_loop_header_.set(block);
+  }
+
+  bool HasParentLoopHeader() const { return parent_loop_header_.is_set(); }
+
+  void SetJoinId(int id);
+
+  void Finish(HControlInstruction* last);
+  void Goto(HBasicBlock* block, bool include_stack_check = false);
+
+  int PredecessorIndexOf(HBasicBlock* predecessor) const;
+  void AddSimulate(int id) { AddInstruction(CreateSimulate(id)); }
+  void AssignCommonDominator(HBasicBlock* other);
+
+  // Add the inlined function exit sequence, adding an HLeaveInlined
+  // instruction and updating the bailout environment.
+  void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
+
+  // If a target block is tagged as an inline function return, all
+  // predecessors should contain the inlined exit sequence:
+  //
+  // LeaveInlined
+  // Simulate (caller's environment)
+  // Goto (target block)
+  bool IsInlineReturnTarget() const { return is_inline_return_target_; }
+  void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
+
+  Handle<Object> cond() { return cond_; }
+  void set_cond(Handle<Object> value) { cond_ = value; }
+
+#ifdef DEBUG
+  void Verify();
+#endif
+
+ private:
+  void RegisterPredecessor(HBasicBlock* pred);
+  void AddDominatedBlock(HBasicBlock* block);
+
+  HSimulate* CreateSimulate(int id);
+
+  int block_id_;
+  HGraph* graph_;
+  ZoneList<HPhi*> phis_;
+  HInstruction* first_;
+  HInstruction* last_;  // Last non-control instruction of the block.
+  HControlInstruction* end_;
+  HLoopInformation* loop_information_;
+  ZoneList<HBasicBlock*> predecessors_;
+  HBasicBlock* dominator_;
+  ZoneList<HBasicBlock*> dominated_blocks_;
+  HEnvironment* last_environment_;
+  // Outgoing parameter count at block exit, set during lithium translation.
+  int argument_count_;
+  // Instruction indices into the lithium code stream.
+  int first_instruction_index_;
+  int last_instruction_index_;
+  ZoneList<int> deleted_phis_;
+  SetOncePointer<HBasicBlock> parent_loop_header_;
+  bool is_inline_return_target_;
+  Handle<Object> cond_;
+};
+
+
+class HLoopInformation: public ZoneObject {
+ public:
+  explicit HLoopInformation(HBasicBlock* loop_header)
+      : back_edges_(4), loop_header_(loop_header), blocks_(8) {
+    blocks_.Add(loop_header);
+  }
+  virtual ~HLoopInformation() {}
+
+  const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
+  const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
+  HBasicBlock* loop_header() const { return loop_header_; }
+  HBasicBlock* GetLastBackEdge() const;
+  void RegisterBackEdge(HBasicBlock* block);
+
+ private:
+  void AddBlock(HBasicBlock* block);
+
+  ZoneList<HBasicBlock*> back_edges_;
+  HBasicBlock* loop_header_;
+  ZoneList<HBasicBlock*> blocks_;
+};
+
+
+class HSubgraph: public ZoneObject {
+ public:
+  explicit HSubgraph(HGraph* graph)
+      : graph_(graph),
+        entry_block_(NULL),
+        exit_block_(NULL),
+        break_continue_info_(4) {
+  }
+
+  HGraph* graph() const { return graph_; }
+  HEnvironment* environment() const {
+    ASSERT(HasExit());
+    return exit_block_->last_environment();
+  }
+
+  bool HasExit() const { return exit_block_ != NULL; }
+
+  void PreProcessOsrEntry(IterationStatement* statement);
+
+  void AppendOptional(HSubgraph* graph,
+                      bool on_true_branch,
+                      HValue* boolean_value);
+  void AppendJoin(HSubgraph* then_graph, HSubgraph* else_graph, AstNode* node);
+  void AppendWhile(HSubgraph* condition,
+                   HSubgraph* body,
+                   IterationStatement* statement,
+                   HSubgraph* continue_subgraph,
+                   HSubgraph* exit);
+  void AppendDoWhile(HSubgraph* body,
+                     IterationStatement* statement,
+                     HSubgraph* go_back,
+                     HSubgraph* exit);
+  void AppendEndless(HSubgraph* body, IterationStatement* statement);
+  void Append(HSubgraph* next, BreakableStatement* statement);
+  void ResolveContinue(IterationStatement* statement);
+  HBasicBlock* BundleBreak(BreakableStatement* statement);
+  HBasicBlock* BundleContinue(IterationStatement* statement);
+  HBasicBlock* BundleBreakContinue(BreakableStatement* statement,
+                                   bool is_continue,
+                                   int join_id);
+  HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
+
+  void FinishExit(HControlInstruction* instruction);
+  void FinishBreakContinue(BreakableStatement* target, bool is_continue);
+  void Initialize(HBasicBlock* block) {
+    ASSERT(entry_block_ == NULL);
+    entry_block_ = block;
+    exit_block_ = block;
+  }
+  HBasicBlock* entry_block() const { return entry_block_; }
+  HBasicBlock* exit_block() const { return exit_block_; }
+  void set_exit_block(HBasicBlock* block) {
+    exit_block_ = block;
+  }
+
+  void ConnectExitTo(HBasicBlock* other, bool include_stack_check = false) {
+    if (HasExit()) {
+      exit_block()->Goto(other, include_stack_check);
+    }
+  }
+
+  void AddBreakContinueInfo(HSubgraph* other) {
+    break_continue_info_.AddAll(other->break_continue_info_);
+  }
+
+ protected:
+  class BreakContinueInfo: public ZoneObject {
+   public:
+    BreakContinueInfo(BreakableStatement* target, HBasicBlock* block,
+                      bool is_continue)
+      : target_(target), block_(block), continue_(is_continue) {}
+    BreakableStatement* target() const { return target_; }
+    HBasicBlock* block() const { return block_; }
+    bool is_continue() const { return continue_; }
+    bool IsResolved() const { return block_ == NULL; }
+    void Resolve() { block_ = NULL; }
+
+   private:
+    BreakableStatement* target_;
+    HBasicBlock* block_;
+    bool continue_;
+  };
+
+  const ZoneList<BreakContinueInfo*>* break_continue_info() const {
+    return &break_continue_info_;
+  }
+
+  HGraph* graph_;  // The graph this is a subgraph of.
+  HBasicBlock* entry_block_;
+  HBasicBlock* exit_block_;
+
+ private:
+  ZoneList<BreakContinueInfo*> break_continue_info_;
+};
+
+
+class HGraph: public HSubgraph {
+ public:
+  explicit HGraph(CompilationInfo* info);
+
+  CompilationInfo* info() const { return info_; }
+  const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
+  const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
+  Handle<String> debug_name() const { return info_->function()->debug_name(); }
+  HEnvironment* start_environment() const { return start_environment_; }
+
+  void InitializeInferredTypes();
+  void InsertTypeConversions();
+  void InsertRepresentationChanges();
+  bool ProcessArgumentsObject();
+  void EliminateRedundantPhis();
+  void Canonicalize();
+  void OrderBlocks();
+  void AssignDominators();
+
+  // Returns false if there are phi-uses of the arguments-object
+  // which are not supported by the optimizing compiler.
+  bool CollectPhis();
+
+  Handle<Code> Compile();
+
+  void set_undefined_constant(HConstant* constant) {
+    undefined_constant_.set(constant);
+  }
+  HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
+  HConstant* GetConstant1();
+  HConstant* GetConstantMinus1();
+  HConstant* GetConstantTrue();
+  HConstant* GetConstantFalse();
+
+  HBasicBlock* CreateBasicBlock();
+  HArgumentsObject* GetArgumentsObject() const {
+    return arguments_object_.get();
+  }
+  bool HasArgumentsObject() const { return arguments_object_.is_set(); }
+
+  void SetArgumentsObject(HArgumentsObject* object) {
+    arguments_object_.set(object);
+  }
+
+  // True iff. we are compiling for OSR and the statement is the entry.
+  bool HasOsrEntryAt(IterationStatement* statement);
+
+  int GetMaximumValueID() const { return values_.length(); }
+  int GetNextBlockID() { return next_block_id_++; }
+  int GetNextValueID(HValue* value) {
+    values_.Add(value);
+    return values_.length() - 1;
+  }
+  HValue* LookupValue(int id) const {
+    if (id >= 0 && id < values_.length()) return values_[id];
+    return NULL;
+  }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+ private:
+  void Postorder(HBasicBlock* block,
+                 BitVector* visited,
+                 ZoneList<HBasicBlock*>* order,
+                 HBasicBlock* loop_header);
+  void PostorderLoopBlocks(HLoopInformation* loop,
+                           BitVector* visited,
+                           ZoneList<HBasicBlock*>* order,
+                           HBasicBlock* loop_header);
+  HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
+                         Object* value);
+
+  void InsertTypeConversions(HInstruction* instr);
+  void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
+  void InsertRepresentationChangeForUse(HValue* value,
+                                        HValue* use,
+                                        Representation to,
+                                        bool truncating);
+  void InsertRepresentationChanges(HValue* current);
+  void InferTypes(ZoneList<HValue*>* worklist);
+  void InitializeInferredTypes(int from_inclusive, int to_inclusive);
+  void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
+
+  int next_block_id_;
+  CompilationInfo* info_;
+  HEnvironment* start_environment_;
+  ZoneList<HBasicBlock*> blocks_;
+  ZoneList<HValue*> values_;
+  ZoneList<HPhi*>* phi_list_;
+  SetOncePointer<HConstant> undefined_constant_;
+  SetOncePointer<HConstant> constant_1_;
+  SetOncePointer<HConstant> constant_minus1_;
+  SetOncePointer<HConstant> constant_true_;
+  SetOncePointer<HConstant> constant_false_;
+  SetOncePointer<HArgumentsObject> arguments_object_;
+
+  friend class HSubgraph;
+
+  DISALLOW_COPY_AND_ASSIGN(HGraph);
+};
+
+
+class HEnvironment: public ZoneObject {
+ public:
+  HEnvironment(HEnvironment* outer,
+               Scope* scope,
+               Handle<JSFunction> closure);
+
+  void Bind(Variable* variable, HValue* value) {
+    Bind(IndexFor(variable), value);
+
+    if (FLAG_trace_environment) {
+      PrintF("Slot index=%d name=%s\n",
+             variable->AsSlot()->index(),
+             *variable->name()->ToCString());
+    }
+  }
+
+  void Bind(int index, HValue* value) {
+    ASSERT(value != NULL);
+    if (!assigned_variables_.Contains(index)) {
+      assigned_variables_.Add(index);
+    }
+    values_[index] = value;
+  }
+
+  HValue* Lookup(Variable* variable) const {
+    return Lookup(IndexFor(variable));
+  }
+  HValue* Lookup(int index) const {
+    HValue* result = values_[index];
+    ASSERT(result != NULL);
+    return result;
+  }
+
+  void Push(HValue* value) {
+    ASSERT(value != NULL);
+    ++push_count_;
+    values_.Add(value);
+  }
+
+  HValue* Top() const { return ExpressionStackAt(0); }
+
+  HValue* ExpressionStackAt(int index_from_top) const {
+    int index = values_.length() - index_from_top - 1;
+    ASSERT(IsExpressionStackIndex(index));
+    return values_[index];
+  }
+
+  void SetExpressionStackAt(int index_from_top, HValue* value) {
+    int index = values_.length() - index_from_top - 1;
+    ASSERT(IsExpressionStackIndex(index));
+    values_[index] = value;
+  }
+
+  HValue* Pop() {
+    ASSERT(!IsExpressionStackEmpty());
+    if (push_count_ > 0) {
+      --push_count_;
+      ASSERT(push_count_ >= 0);
+    } else {
+      ++pop_count_;
+    }
+    return values_.RemoveLast();
+  }
+
+  void Drop(int count) {
+    for (int i = 0; i < count; ++i) {
+      Pop();
+    }
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+
+  // ID of the original AST node to identify deoptimization points.
+  int ast_id() const { return ast_id_; }
+  void set_ast_id(int id) { ast_id_ = id; }
+
+  const ZoneList<HValue*>* values() const { return &values_; }
+  const ZoneList<int>* assigned_variables() const {
+    return &assigned_variables_;
+  }
+  int parameter_count() const { return parameter_count_; }
+  int local_count() const { return local_count_; }
+  int push_count() const { return push_count_; }
+  int pop_count() const { return pop_count_; }
+  int total_count() const { return values_.length(); }
+  HEnvironment* outer() const { return outer_; }
+  HEnvironment* Copy() const;
+  HEnvironment* CopyWithoutHistory() const;
+  HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
+
+  // Create an "inlined version" of this environment, where the original
+  // environment is the outer environment but the top expression stack
+  // elements are moved to an inner environment as parameters. If
+  // is_speculative, the argument values are expected to be PushArgument
+  // instructions, otherwise they are the actual values.
+  HEnvironment* CopyForInlining(Handle<JSFunction> target,
+                                FunctionLiteral* function,
+                                bool is_speculative,
+                                HConstant* undefined) const;
+
+  void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
+  void ClearHistory() {
+    pop_count_ = 0;
+    push_count_ = 0;
+    assigned_variables_.Clear();
+  }
+  void SetValueAt(int index, HValue* value) {
+    ASSERT(index < total_count());
+    values_[index] = value;
+  }
+
+  void PrintTo(StringStream* stream);
+  void PrintToStd();
+
+ private:
+  explicit HEnvironment(const HEnvironment* other);
+
+  bool IsExpressionStackIndex(int index) const {
+    return index >= parameter_count_ + local_count_;
+  }
+  bool IsExpressionStackEmpty() const {
+    int length = values_.length();
+    int first_expression = parameter_count() + local_count();
+    ASSERT(length >= first_expression);
+    return length == first_expression;
+  }
+  void Initialize(int parameter_count, int local_count, int stack_height);
+  void Initialize(const HEnvironment* other);
+  int VariableToIndex(Variable* var);
+  int IndexFor(Variable* variable) const;
+
+  Handle<JSFunction> closure_;
+  // Value array [parameters] [locals] [temporaries].
+  ZoneList<HValue*> values_;
+  ZoneList<int> assigned_variables_;
+  int parameter_count_;
+  int local_count_;
+  HEnvironment* outer_;
+  int pop_count_;
+  int push_count_;
+  int ast_id_;
+};
+
+
+class HGraphBuilder;
+
+class AstContext {
+ public:
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+  bool IsTest() const { return kind_ == Expression::kTest; }
+
+  // 'Fill' this context with a hydrogen value.  The value is assumed to
+  // have already been inserted in the instruction stream (or not need to
+  // be, e.g., HPhi).  Call this function in tail position in the Visit
+  // functions for expressions.
+  virtual void ReturnValue(HValue* value) = 0;
+
+  // Add a hydrogen instruction to the instruction stream (recording an
+  // environment simulation if necessary) and then fill this context with
+  // the instruction as value.
+  virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
+
+ protected:
+  AstContext(HGraphBuilder* owner, Expression::Context kind);
+  virtual ~AstContext();
+
+  HGraphBuilder* owner() const { return owner_; }
+
+  // We want to be able to assert, in a context-specific way, that the stack
+  // height makes sense when the context is filled.
+#ifdef DEBUG
+  int original_count_;
+#endif
+
+ private:
+  HGraphBuilder* owner_;
+  Expression::Context kind_;
+  AstContext* outer_;
+};
+
+
+class EffectContext: public AstContext {
+ public:
+  explicit EffectContext(HGraphBuilder* owner)
+      : AstContext(owner, Expression::kEffect) {
+  }
+  virtual ~EffectContext();
+
+  virtual void ReturnValue(HValue* value);
+  virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+};
+
+
+class ValueContext: public AstContext {
+ public:
+  explicit ValueContext(HGraphBuilder* owner)
+      : AstContext(owner, Expression::kValue) {
+  }
+  virtual ~ValueContext();
+
+  virtual void ReturnValue(HValue* value);
+  virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+};
+
+
+class TestContext: public AstContext {
+ public:
+  TestContext(HGraphBuilder* owner,
+              HBasicBlock* if_true,
+              HBasicBlock* if_false)
+      : AstContext(owner, Expression::kTest),
+        if_true_(if_true),
+        if_false_(if_false) {
+  }
+
+  virtual void ReturnValue(HValue* value);
+  virtual void ReturnInstruction(HInstruction* instr, int ast_id);
+
+  static TestContext* cast(AstContext* context) {
+    ASSERT(context->IsTest());
+    return reinterpret_cast<TestContext*>(context);
+  }
+
+  HBasicBlock* if_true() const { return if_true_; }
+  HBasicBlock* if_false() const { return if_false_; }
+
+ private:
+  // Build the shared core part of the translation unpacking a value into
+  // control flow.
+  void BuildBranch(HValue* value);
+
+  HBasicBlock* if_true_;
+  HBasicBlock* if_false_;
+};
+
+
+class HGraphBuilder: public AstVisitor {
+ public:
+  explicit HGraphBuilder(TypeFeedbackOracle* oracle)
+      : oracle_(oracle),
+        graph_(NULL),
+        current_subgraph_(NULL),
+        peeled_statement_(NULL),
+        ast_context_(NULL),
+        call_context_(NULL),
+        function_return_(NULL),
+        inlined_count_(0) { }
+
+  HGraph* CreateGraph(CompilationInfo* info);
+
+  // Simple accessors.
+  HGraph* graph() const { return graph_; }
+  HSubgraph* subgraph() const { return current_subgraph_; }
+
+  HEnvironment* environment() const { return subgraph()->environment(); }
+  HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
+
+  // Adding instructions.
+  HInstruction* AddInstruction(HInstruction* instr);
+  void AddSimulate(int id);
+
+  // Bailout environment manipulation.
+  void Push(HValue* value) { environment()->Push(value); }
+  HValue* Pop() { return environment()->Pop(); }
+
+ private:
+  // Type of a member function that generates inline code for a native function.
+  typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count,
+                                                         int ast_id);
+
+  // Forward declarations for inner scope classes.
+  class SubgraphScope;
+
+  static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
+  static const int kMaxCallPolymorphism = 4;
+  static const int kMaxLoadPolymorphism = 4;
+  static const int kMaxStorePolymorphism = 4;
+
+  static const int kMaxInlinedNodes = 196;
+  static const int kMaxInlinedSize = 196;
+  static const int kMaxSourceSize = 600;
+
+  // Simple accessors.
+  TypeFeedbackOracle* oracle() const { return oracle_; }
+  AstContext* ast_context() const { return ast_context_; }
+  void set_ast_context(AstContext* context) { ast_context_ = context; }
+  AstContext* call_context() const { return call_context_; }
+  HBasicBlock* function_return() const { return function_return_; }
+
+  // Generators for inline runtime functions.
+#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize)      \
+  void Generate##Name(int argument_count, int ast_id);
+
+  INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+  INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
+#undef INLINE_FUNCTION_GENERATOR_DECLARATION
+
+  void Bailout(const char* reason);
+
+  void AppendPeeledWhile(IterationStatement* stmt,
+                         HSubgraph* cond_graph,
+                         HSubgraph* body_graph,
+                         HSubgraph* exit_graph);
+
+  void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
+  void AddToSubgraph(HSubgraph* graph, Statement* stmt);
+  void AddToSubgraph(HSubgraph* graph, Expression* expr);
+
+  HValue* Top() const { return environment()->Top(); }
+  void Drop(int n) { environment()->Drop(n); }
+  void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
+
+  void VisitForValue(Expression* expr);
+  void VisitForEffect(Expression* expr);
+  void VisitForControl(Expression* expr,
+                       HBasicBlock* true_block,
+                       HBasicBlock* false_block);
+
+  // Visit an argument and wrap it in a PushArgument instruction.
+  HValue* VisitArgument(Expression* expr);
+  void VisitArgumentList(ZoneList<Expression*>* arguments);
+
+  void AddPhi(HPhi* phi);
+
+  void PushAndAdd(HInstruction* instr);
+
+  void PushArgumentsForStubCall(int argument_count);
+
+  // Remove the arguments from the bailout environment and emit instructions
+  // to push them as outgoing parameters.
+  void ProcessCall(HCall* call);
+
+  void AssumeRepresentation(HValue* value, Representation r);
+  static Representation ToRepresentation(TypeInfo info);
+
+  void SetupScope(Scope* scope);
+  virtual void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  bool ShouldPeel(HSubgraph* cond, HSubgraph* body);
+
+  HBasicBlock* CreateBasicBlock(HEnvironment* env);
+  HSubgraph* CreateEmptySubgraph();
+  HSubgraph* CreateGotoSubgraph(HEnvironment* env);
+  HSubgraph* CreateBranchSubgraph(HEnvironment* env);
+  HSubgraph* CreateLoopHeaderSubgraph(HEnvironment* env);
+  HSubgraph* CreateInlinedSubgraph(HEnvironment* outer,
+                                   Handle<JSFunction> target,
+                                   FunctionLiteral* function);
+
+  // Helpers for flow graph construction.
+  void LookupGlobalPropertyCell(Variable* var,
+                                LookupResult* lookup,
+                                bool is_store);
+
+  bool TryArgumentsAccess(Property* expr);
+  bool TryCallApply(Call* expr);
+  bool TryInline(Call* expr);
+  bool TryMathFunctionInline(Call* expr);
+  void TraceInline(Handle<JSFunction> target, bool result);
+
+  void HandleGlobalVariableAssignment(Variable* var,
+                                      HValue* value,
+                                      int position,
+                                      int ast_id);
+
+  void HandlePropertyAssignment(Assignment* expr);
+  void HandleCompoundAssignment(Assignment* expr);
+  void HandlePolymorphicLoadNamedField(Property* expr,
+                                       HValue* object,
+                                       ZoneMapList* types,
+                                       Handle<String> name);
+  void HandlePolymorphicStoreNamedField(Assignment* expr,
+                                        HValue* object,
+                                        HValue* value,
+                                        ZoneMapList* types,
+                                        Handle<String> name);
+  void HandlePolymorphicCallNamed(Call* expr,
+                                  HValue* receiver,
+                                  ZoneMapList* types,
+                                  Handle<String> name);
+
+  HInstruction* BuildBinaryOperation(BinaryOperation* expr,
+                                     HValue* left,
+                                     HValue* right);
+  HInstruction* BuildIncrement(HValue* value, bool increment);
+  HLoadNamedField* BuildLoadNamedField(HValue* object,
+                                       Property* expr,
+                                       Handle<Map> type,
+                                       LookupResult* result,
+                                       bool smi_and_map_check);
+  HInstruction* BuildLoadNamedGeneric(HValue* object, Property* expr);
+  HInstruction* BuildLoadKeyedFastElement(HValue* object,
+                                          HValue* key,
+                                          Property* expr);
+  HInstruction* BuildLoadKeyedGeneric(HValue* object,
+                                      HValue* key);
+
+  HInstruction* BuildLoadNamed(HValue* object,
+                               Property* prop,
+                               Handle<Map> map,
+                               Handle<String> name);
+  HInstruction* BuildStoreNamed(HValue* object,
+                                HValue* value,
+                                Expression* expr);
+  HInstruction* BuildStoreNamedField(HValue* object,
+                                     Handle<String> name,
+                                     HValue* value,
+                                     Handle<Map> type,
+                                     LookupResult* lookup,
+                                     bool smi_and_map_check);
+  HInstruction* BuildStoreNamedGeneric(HValue* object,
+                                       Handle<String> name,
+                                       HValue* value);
+  HInstruction* BuildStoreKeyedGeneric(HValue* object,
+                                       HValue* key,
+                                       HValue* value);
+
+  HInstruction* BuildStoreKeyedFastElement(HValue* object,
+                                           HValue* key,
+                                           HValue* val,
+                                           Expression* expr);
+
+  HCompare* BuildSwitchCompare(HSubgraph* subgraph,
+                               HValue* switch_value,
+                               CaseClause* clause);
+
+  void AddCheckConstantFunction(Call* expr,
+                                HValue* receiver,
+                                Handle<Map> receiver_map,
+                                bool smi_and_map_check);
+
+
+  HBasicBlock* BuildTypeSwitch(ZoneMapList* maps,
+                               ZoneList<HSubgraph*>* subgraphs,
+                               HValue* receiver,
+                               int join_id);
+
+  TypeFeedbackOracle* oracle_;
+  HGraph* graph_;
+  HSubgraph* current_subgraph_;
+  IterationStatement* peeled_statement_;
+  // Expression context of the currently visited subexpression. NULL when
+  // visiting statements.
+  AstContext* ast_context_;
+
+  // During function inlining, expression context of the call being
+  // inlined. NULL when not inlining.
+  AstContext* call_context_;
+
+  // When inlining a call in an effect or value context, the return
+  // block. NULL otherwise. When inlining a call in a test context, there
+  // are a pair of target blocks in the call context.
+  HBasicBlock* function_return_;
+
+  int inlined_count_;
+
+  friend class AstContext;  // Pushes and pops the AST context stack.
+
+  DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+};
+
+
+class HValueMap: public ZoneObject {
+ public:
+  HValueMap()
+      : array_size_(0),
+        lists_size_(0),
+        count_(0),
+        present_flags_(0),
+        array_(NULL),
+        lists_(NULL),
+        free_list_head_(kNil) {
+    ResizeLists(kInitialSize);
+    Resize(kInitialSize);
+  }
+
+  void Kill(int flags);
+
+  void Add(HValue* value) {
+    present_flags_ |= value->flags();
+    Insert(value);
+  }
+
+  HValue* Lookup(HValue* value) const;
+  HValueMap* Copy() const { return new HValueMap(this); }
+
+ private:
+  // A linked list of HValue* values.  Stored in arrays.
+  struct HValueMapListElement {
+    HValue* value;
+    int next;  // Index in the array of the next list element.
+  };
+  static const int kNil = -1;  // The end of a linked list
+
+  // Must be a power of 2.
+  static const int kInitialSize = 16;
+
+  explicit HValueMap(const HValueMap* other);
+
+  void Resize(int new_size);
+  void ResizeLists(int new_size);
+  void Insert(HValue* value);
+  uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
+
+  int array_size_;
+  int lists_size_;
+  int count_;  // The number of values stored in the HValueMap.
+  int present_flags_;  // All flags that are in any value in the HValueMap.
+  HValueMapListElement* array_;  // Primary store - contains the first value
+  // with a given hash.  Colliding elements are stored in linked lists.
+  HValueMapListElement* lists_;  // The linked lists containing hash collisions.
+  int free_list_head_;  // Unused elements in lists_ are on the free list.
+};
+
+
+class HStatistics: public Malloced {
+ public:
+  void Print();
+  void SaveTiming(const char* name, int64_t ticks);
+  static HStatistics* Instance() {
+    static SetOncePointer<HStatistics> instance;
+    if (!instance.is_set()) {
+      instance.set(new HStatistics());
+    }
+    return instance.get();
+  }
+
+ private:
+
+  HStatistics() : timing_(5), names_(5), total_(0), full_code_gen_(0) { }
+
+  List<int64_t> timing_;
+  List<const char*> names_;
+  int64_t total_;
+  int64_t full_code_gen_;
+};
+
+
+class HPhase BASE_EMBEDDED {
+ public:
+  static const char* const kFullCodeGen;
+  static const char* const kTotal;
+
+  explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
+  HPhase(const char* name, HGraph* graph) {
+    Begin(name, graph, NULL, NULL);
+  }
+  HPhase(const char* name, LChunk* chunk) {
+    Begin(name, NULL, chunk, NULL);
+  }
+  HPhase(const char* name, LAllocator* allocator) {
+    Begin(name, NULL, NULL, allocator);
+  }
+
+  ~HPhase() {
+    End();
+  }
+
+ private:
+  void Begin(const char* name,
+             HGraph* graph,
+             LChunk* chunk,
+             LAllocator* allocator);
+  void End() const;
+
+  int64_t start_;
+  const char* name_;
+  HGraph* graph_;
+  LChunk* chunk_;
+  LAllocator* allocator_;
+};
+
+
+class HTracer: public Malloced {
+ public:
+  void TraceCompilation(FunctionLiteral* function);
+  void TraceHydrogen(const char* name, HGraph* graph);
+  void TraceLithium(const char* name, LChunk* chunk);
+  void TraceLiveRanges(const char* name, LAllocator* allocator);
+
+  static HTracer* Instance() {
+    static SetOncePointer<HTracer> instance;
+    if (!instance.is_set()) {
+      instance.set(new HTracer("hydrogen.cfg"));
+    }
+    return instance.get();
+  }
+
+ private:
+  class Tag BASE_EMBEDDED {
+   public:
+    Tag(HTracer* tracer, const char* name) {
+      name_ = name;
+      tracer_ = tracer;
+      tracer->PrintIndent();
+      tracer->trace_.Add("begin_%s\n", name);
+      tracer->indent_++;
+    }
+
+    ~Tag() {
+      tracer_->indent_--;
+      tracer_->PrintIndent();
+      tracer_->trace_.Add("end_%s\n", name_);
+      ASSERT(tracer_->indent_ >= 0);
+      tracer_->FlushToFile();
+    }
+
+   private:
+    HTracer* tracer_;
+    const char* name_;
+  };
+
+  explicit HTracer(const char* filename)
+      : filename_(filename), trace_(&string_allocator_), indent_(0) {
+    WriteChars(filename, "", 0, false);
+  }
+
+  void TraceLiveRange(LiveRange* range, const char* type);
+  void Trace(const char* name, HGraph* graph, LChunk* chunk);
+  void FlushToFile();
+
+  void PrintEmptyProperty(const char* name) {
+    PrintIndent();
+    trace_.Add("%s\n", name);
+  }
+
+  void PrintStringProperty(const char* name, const char* value) {
+    PrintIndent();
+    trace_.Add("%s \"%s\"\n", name, value);
+  }
+
+  void PrintLongProperty(const char* name, int64_t value) {
+    PrintIndent();
+    trace_.Add("%s %d000\n", name, static_cast<int>(value / 1000));
+  }
+
+  void PrintBlockProperty(const char* name, int block_id) {
+    PrintIndent();
+    trace_.Add("%s \"B%d\"\n", name, block_id);
+  }
+
+  void PrintBlockProperty(const char* name, int block_id1, int block_id2) {
+    PrintIndent();
+    trace_.Add("%s \"B%d\" \"B%d\"\n", name, block_id1, block_id2);
+  }
+
+  void PrintIntProperty(const char* name, int value) {
+    PrintIndent();
+    trace_.Add("%s %d\n", name, value);
+  }
+
+  void PrintIndent() {
+    for (int i = 0; i < indent_; i++) {
+      trace_.Add("  ");
+    }
+  }
+
+  const char* filename_;
+  HeapStringAllocator string_allocator_;
+  StringStream trace_;
+  int indent_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_HYDROGEN_H_
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index ecbdfdc..54cfb5c 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -120,6 +120,30 @@
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 Address RelocInfo::call_address() {
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -167,6 +191,8 @@
     visitor->VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -190,6 +216,8 @@
     StaticVisitor::VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(this);
+  } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+    StaticVisitor::VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -246,6 +274,12 @@
 }
 
 
+Immediate::Immediate(Address addr) {
+  x_ = reinterpret_cast<int32_t>(addr);
+  rmode_ = RelocInfo::NONE;
+}
+
+
 void Assembler::emit(uint32_t x) {
   *reinterpret_cast<uint32_t*>(pc_) = x;
   pc_ += sizeof(uint32_t);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 125f503..c173a3d 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -56,10 +56,10 @@
 
 // The Probe method needs executable memory, so it uses Heap::CreateCode.
 // Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool portable) {
   ASSERT(Heap::HasBeenSetup());
   ASSERT(supported_ == 0);
-  if (Serializer::enabled()) {
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -137,7 +137,7 @@
   found_by_runtime_probing_ = supported_;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= ~os_guarantees;
+  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
 }
 
 
@@ -435,6 +435,13 @@
 }
 
 
+void Assembler::push_imm32(int32_t imm32) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x68);
+  emit(imm32);
+}
+
+
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1542,7 +1549,9 @@
   L->bind_to(pc_offset());
 }
 
+
 void Assembler::call(Label* L) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   if (L->is_bound()) {
@@ -1561,6 +1570,7 @@
 
 
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(!RelocInfo::IsCodeTarget(rmode));
@@ -1570,6 +1580,7 @@
 
 
 void Assembler::call(const Operand& adr) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xFF);
@@ -1772,6 +1783,14 @@
 }
 
 
+void Assembler::fldln2() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xED);
+}
+
+
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1902,6 +1921,14 @@
 }
 
 
+void Assembler::fyl2x() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xF1);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2382,6 +2409,7 @@
   emit_sse_operand(dst, src);
 }
 
+
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2404,6 +2432,28 @@
 }
 
 
+void Assembler::movd(const Operand& dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x7E);
+  emit_sse_operand(src, dst);
+}
+
+
+void Assembler::pand(XMMRegister dst, XMMRegister src) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0xDB);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2427,7 +2477,7 @@
 }
 
 
-void Assembler::psllq(XMMRegister reg, int8_t imm8) {
+void Assembler::psllq(XMMRegister reg, int8_t shift) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2435,7 +2485,32 @@
   EMIT(0x0F);
   EMIT(0x73);
   emit_sse_operand(esi, reg);  // esi == 6
-  EMIT(imm8);
+  EMIT(shift);
+}
+
+
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x70);
+  emit_sse_operand(dst, src);
+  EMIT(shuffle);
+}
+
+
+void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x66);
+  EMIT(0x0F);
+  EMIT(0x3A);
+  EMIT(0x16);
+  emit_sse_operand(src, dst);
+  EMIT(offset);
 }
 
 
@@ -2475,7 +2550,7 @@
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
@@ -2607,9 +2682,15 @@
 }
 
 
-void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) {
+void Assembler::db(uint8_t data) {
   EnsureSpace ensure_space(this);
-  emit(data, reloc_info);
+  EMIT(data);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  EnsureSpace ensure_space(this);
+  emit(data);
 }
 
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 79637a1..11acb56 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
@@ -64,7 +64,36 @@
 // and best performance in optimized code.
 //
 struct Register {
-  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  static const int kNumAllocatableRegisters = 5;
+  static const int kNumRegisters = 8;
+
+  static int ToAllocationIndex(Register reg) {
+    ASSERT(reg.code() < 4 || reg.code() == 7);
+    return (reg.code() == 7) ? 4 : reg.code();
+  }
+
+  static Register FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return (index == 4) ? from_code(7) : from_code(index);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "eax",
+      "ecx",
+      "edx",
+      "ebx",
+      "edi"
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   // eax, ebx, ecx and edx are byte registers, the rest are not.
   bool is_byte_register() const { return code_ <= 3; }
@@ -93,7 +122,40 @@
 
 
 struct XMMRegister {
-  bool is_valid() const { return 0 <= code_ && code_ < 8; }
+  static const int kNumAllocatableRegisters = 7;
+  static const int kNumRegisters = 8;
+
+  static int ToAllocationIndex(XMMRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static XMMRegister FromAllocationIndex(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    return from_code(index + 1);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "xmm1",
+      "xmm2",
+      "xmm3",
+      "xmm4",
+      "xmm5",
+      "xmm6",
+      "xmm7"
+    };
+    return names[index];
+  }
+
+  static XMMRegister from_code(int code) {
+    XMMRegister r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -102,6 +164,7 @@
   int code_;
 };
 
+
 const XMMRegister xmm0 = { 0 };
 const XMMRegister xmm1 = { 1 };
 const XMMRegister xmm2 = { 2 };
@@ -111,6 +174,17 @@
 const XMMRegister xmm6 = { 6 };
 const XMMRegister xmm7 = { 7 };
 
+
+typedef XMMRegister DoubleRegister;
+
+
+// Index of register used in pusha/popa.
+// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI
+inline int EspIndexForPushAll(Register reg) {
+  return Register::kNumRegisters - 1 - reg.code();
+}
+
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -202,6 +276,7 @@
   inline explicit Immediate(const ExternalReference& ext);
   inline explicit Immediate(Handle<Object> handle);
   inline explicit Immediate(Smi* value);
+  inline explicit Immediate(Address addr);
 
   static Immediate CodeRelativeOffset(Label* label) {
     return Immediate(label);
@@ -281,6 +356,11 @@
                    RelocInfo::EXTERNAL_REFERENCE);
   }
 
+  static Operand Cell(Handle<JSGlobalPropertyCell> cell) {
+    return Operand(reinterpret_cast<int32_t>(cell.location()),
+                   RelocInfo::GLOBAL_PROPERTY_CELL);
+  }
+
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
@@ -369,9 +449,12 @@
 //   }
 class CpuFeatures : public AllStatic {
  public:
-  // Detect features of the target CPU. Set safe defaults if the serializer
-  // is enabled (snapshots must be portable).
-  static void Probe();
+  // Detect features of the target CPU. If the portable flag is set,
+  // the method sets safe defaults if the serializer is enabled
+  // (snapshots must be portable).
+  static void Probe(bool portable);
+  static void Clear() { supported_ = 0; }
+
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
     if (f == SSE2 && !FLAG_enable_sse2) return false;
@@ -484,6 +567,20 @@
   // The debug break slot must be able to contain a call instruction.
   static const int kDebugBreakSlotLength = kCallInstructionLength;
 
+  // One byte opcode for test eax,0xXXXXXXXX.
+  static const byte kTestEaxByte = 0xA9;
+  // One byte opcode for test al, 0xXX.
+  static const byte kTestAlByte = 0xA8;
+  // One byte opcode for nop.
+  static const byte kNopByte = 0x90;
+
+  // One byte opcode for a short unconditional jump.
+  static const byte kJmpShortOpcode = 0xEB;
+  // One byte prefix for a short conditional jump.
+  static const byte kJccShortPrefix = 0x70;
+  static const byte kJncShortOpcode = kJccShortPrefix | not_carry;
+  static const byte kJcShortOpcode = kJccShortPrefix | carry;
+
   // ---------------------------------------------------------------------------
   // Code generation
   //
@@ -519,6 +616,7 @@
   void popfd();
 
   void push(const Immediate& x);
+  void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
 
@@ -720,6 +818,7 @@
   void fld1();
   void fldz();
   void fldpi();
+  void fldln2();
 
   void fld_s(const Operand& adr);
   void fld_d(const Operand& adr);
@@ -744,6 +843,7 @@
   void fchs();
   void fcos();
   void fsin();
+  void fyl2x();
 
   void fadd(int i);
   void fsub(int i);
@@ -814,12 +914,16 @@
   void movdbl(const Operand& dst, XMMRegister src);
 
   void movd(XMMRegister dst, const Operand& src);
+  void movd(const Operand& src, XMMRegister dst);
   void movsd(XMMRegister dst, XMMRegister src);
 
+  void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
   void ptest(XMMRegister dst, XMMRegister src);
 
-  void psllq(XMMRegister reg, int8_t imm8);
+  void psllq(XMMRegister reg, int8_t shift);
+  void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+  void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
 
   // Parallel XMM operations.
   void movntdqa(XMMRegister src, const Operand& dst);
@@ -843,12 +947,13 @@
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
-  // Writes a single word of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  void dd(uint32_t data, RelocInfo::Mode reloc_info);
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
 
   int pc_offset() const { return pc_ - buffer_; }
 
@@ -876,8 +981,8 @@
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(Register dst, XMMRegister src);
 
- private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
+ private:
   byte byte_at(int pos)  { return buffer_[pos]; }
   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 0ad3e6d..918f346 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,9 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "code-stubs.h"
 #include "codegen-inl.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -480,6 +481,85 @@
 }
 
 
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(edi);
+
+  __ push(edi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+
+  // Restore function and tear down temporary frame.
+  __ pop(edi);
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(ecx, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(Operand(ecx));
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Pass the function and deoptimization type to the runtime system.
+  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
+
+  // Get the full codegen state from the stack and untag it.
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ SmiUntag(ecx);
+
+  // Switch on the state.
+  NearLabel not_no_registers, not_tos_eax;
+  __ cmp(ecx, FullCodeGenerator::NO_REGISTERS);
+  __ j(not_equal, &not_no_registers);
+  __ ret(1 * kPointerSize);  // Remove state.
+
+  __ bind(&not_no_registers);
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ cmp(ecx, FullCodeGenerator::TOS_REG);
+  __ j(not_equal, &not_tos_eax);
+  __ ret(2 * kPointerSize);  // Remove state, eax.
+
+  __ bind(&not_tos_eax);
+  __ Abort("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  // TODO(kasperl): Do we need to save/restore the XMM registers too?
+
+  // For now, we are relying on the fact that Runtime::NotifyOSR
+  // doesn't do any garbage collection which allows us to save/restore
+  // the registers without worrying about which of them contain
+  // pointers. This seems a bit fragile.
+  __ pushad();
+  __ EnterInternalFrame();
+  __ CallRuntime(Runtime::kNotifyOSR, 0);
+  __ LeaveInternalFrame();
+  __ popad();
+  __ ret(0);
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   { Label done;
@@ -1418,6 +1498,76 @@
 }
 
 
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // We shouldn't be performing on-stack replacement in the first
+  // place if the CPU features we need for the optimized Crankshaft
+  // code aren't supported.
+  CpuFeatures::Probe(false);
+  if (!CpuFeatures::IsSupported(SSE2)) {
+    __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
+    return;
+  }
+
+  // Get the loop depth of the stack guard check. This is recorded in
+  // a test(eax, depth) instruction right after the call.
+  Label stack_check;
+  __ mov(ebx, Operand(esp, 0));  // return address
+  if (FLAG_debug_code) {
+    __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
+    __ Assert(equal, "test eax instruction not found after loop stack check");
+  }
+  __ movzx_b(ebx, Operand(ebx, 1));  // depth
+
+  // Get the loop nesting level at which we allow OSR from the
+  // unoptimized code and check if we want to do OSR yet. If not we
+  // should perform a stack guard check so we can get interrupts while
+  // waiting for on-stack replacement.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
+  __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
+  __ j(greater, &stack_check);
+
+  // Pass the function to optimize as the argument to the on-stack
+  // replacement runtime function.
+  __ EnterInternalFrame();
+  __ push(eax);
+  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  __ LeaveInternalFrame();
+
+  // If the result was -1 it means that we couldn't optimize the
+  // function. Just return and continue in the unoptimized version.
+  NearLabel skip;
+  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+  __ j(not_equal, &skip);
+  __ ret(0);
+
+  // If we decide not to perform on-stack replacement we perform a
+  // stack guard check to enable interrupts.
+  __ bind(&stack_check);
+  NearLabel ok;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, taken);
+  StackCheckStub stub;
+  __ TailCallStub(&stub);
+  __ Abort("Unreachable code: returned from tail call.");
+  __ bind(&ok);
+  __ ret(0);
+
+  __ bind(&skip);
+  // Untag the AST id and push it on the stack.
+  __ SmiUntag(eax);
+  __ push(eax);
+
+  // Generate the code for doing the frame-to-frame translation using
+  // the deoptimizer infrastructure.
+  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
+  generator.Generate();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 5975ad2..a371c96 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -64,6 +64,8 @@
   __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
   __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
   __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
+  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
+         Immediate(Factory::undefined_value()));
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -446,6 +448,11 @@
                                  Label* non_float,
                                  Register scratch);
 
+  // Checks that the two floating point numbers on top of the FPU stack
+  // have int32 values.
+  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
+                                         Label* non_int32);
+
   // Takes the operands in edx and eax and loads them as integers in eax
   // and ecx.
   static void LoadAsIntegers(MacroAssembler* masm,
@@ -460,8 +467,16 @@
                                      bool use_sse3,
                                      Label* operand_conversion_failure);
 
-  // Test if operands are smis or heap numbers and load them
-  // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
+  // operands are pushed on the stack, and that their conversions to int32
+  // are in eax and ecx.  Checks that the original numbers were in the int32
+  // range.
+  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
+                                           bool use_sse3,
+                                           Label* not_int32);
+
+  // Assumes that operands are smis or heap numbers and loads them
+  // into xmm0 and xmm1. Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSSE2Operands(MacroAssembler* masm);
 
@@ -474,6 +489,12 @@
   // Similar to LoadSSE2Operands but assumes that both operands are smis.
   // Expects operands in edx, eax.
   static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
+
+  // Checks that the two floating point numbers loaded into xmm0 and xmm1
+  // have int32 values.
+  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
+                                        Label* non_int32,
+                                        Register scratch);
 };
 
 
@@ -709,22 +730,27 @@
     case Token::SHL: {
       Comment perform_float(masm, "-- Perform float operation on smis");
       __ bind(&use_fp_on_smis);
-      // Result we want is in left == edx, so we can put the allocated heap
-      // number in eax.
-      __ AllocateHeapNumber(eax, ecx, ebx, slow);
-      // Store the result in the HeapNumber and return.
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        __ cvtsi2sd(xmm0, Operand(left));
-        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
+        // Result we want is in left == edx, so we can put the allocated heap
+        // number in eax.
+        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(left));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          // It's OK to overwrite the right argument on the stack because we
+          // are about to return.
+          __ mov(Operand(esp, 1 * kPointerSize), left);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        GenerateReturn(masm);
       } else {
-        // It's OK to overwrite the right argument on the stack because we
-        // are about to return.
-        __ mov(Operand(esp, 1 * kPointerSize), left);
-        __ fild_s(Operand(esp, 1 * kPointerSize));
-        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
+        __ jmp(slow);
       }
-      GenerateReturn(masm);
       break;
     }
 
@@ -757,31 +783,36 @@
         default: UNREACHABLE();
           break;
       }
-      __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-      if (CpuFeatures::IsSupported(SSE2)) {
-        CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSSE2Smis(masm, ebx);
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
+      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
+        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+        } else {  // SSE2 not available, use FPU.
+          FloatingPointHelper::LoadFloatSmis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
         }
-        __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
-      } else {  // SSE2 not available, use FPU.
-        FloatingPointHelper::LoadFloatSmis(masm, ebx);
-        switch (op_) {
-          case Token::ADD: __ faddp(1); break;
-          case Token::SUB: __ fsubp(1); break;
-          case Token::MUL: __ fmulp(1); break;
-          case Token::DIV: __ fdivp(1); break;
-          default: UNREACHABLE();
-        }
-        __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+        __ mov(eax, ecx);
+        GenerateReturn(masm);
+      } else {
+        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
+        __ jmp(slow);
       }
-      __ mov(eax, ecx);
-      GenerateReturn(masm);
       break;
     }
 
@@ -821,6 +852,13 @@
 
   __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
 
+  if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
+    Label slow;
+    if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
+    __ bind(&slow);
+    GenerateTypeTransition(masm);
+  }
+
   // Generate fast case smi code if requested. This flag is set when the fast
   // case smi code is not generated by the caller. Generating it here will speed
   // up common operations.
@@ -1215,42 +1253,1284 @@
 }
 
 
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+  return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  __ push(edx);
+  __ push(eax);
+  // Left and right arguments are now on top.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+// Prepare for a type transition runtime call when the args are already on
+// the stack, under the return address.
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+    MacroAssembler* masm) {
+  __ pop(ecx);  // Save return address.
+  // Left and right arguments are already on top of the stack.
+  // Push this stub's key. Although the operation and the type info are
+  // encoded into the key, the encoding is opaque, so push them too.
+  __ push(Immediate(Smi::FromInt(MinorKey())));
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ push(Immediate(Smi::FromInt(operands_type_)));
+
+  __ push(ecx);  // Push return address.
+
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
+      5,
+      1);
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+  switch (operands_type_) {
+    case TRBinaryOpIC::UNINITIALIZED:
+      GenerateTypeTransition(masm);
+      break;
+    case TRBinaryOpIC::SMI:
+      GenerateSmiStub(masm);
+      break;
+    case TRBinaryOpIC::INT32:
+      GenerateInt32Stub(masm);
+      break;
+    case TRBinaryOpIC::HEAP_NUMBER:
+      GenerateHeapNumberStub(masm);
+      break;
+    case TRBinaryOpIC::STRING:
+      GenerateStringStub(masm);
+      break;
+    case TRBinaryOpIC::GENERIC:
+      GenerateGeneric(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+  if (name_ != NULL) return name_;
+  const int kMaxNameLength = 100;
+  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
+  if (name_ == NULL) return "OOM";
+  const char* op_name = Token::Name(op_);
+  const char* overwrite_name;
+  switch (mode_) {
+    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+    default: overwrite_name = "UnknownOverwrite"; break;
+  }
+
+  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
+               "TypeRecordingBinaryOpStub_%s_%s_%s",
+               op_name,
+               overwrite_name,
+               TRBinaryOpIC::GetName(operands_type_));
+  return name_;
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+    Label* slow,
+    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+  // dividend in eax and edx free for the division.  Use eax, ebx for those.
+  Comment load_comment(masm, "-- Load arguments");
+  Register left = edx;
+  Register right = eax;
+  if (op_ == Token::DIV || op_ == Token::MOD) {
+    left = eax;
+    right = ebx;
+    __ mov(ebx, eax);
+    __ mov(eax, edx);
+  }
+
+
+  // 2. Prepare the smi check of both operands by oring them together.
+  Comment smi_check_comment(masm, "-- Smi check arguments");
+  Label not_smis;
+  Register combined = ecx;
+  ASSERT(!left.is(combined) && !right.is(combined));
+  switch (op_) {
+    case Token::BIT_OR:
+      // Perform the operation into eax and smi check the result.  Preserve
+      // eax in case the result is not a smi.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      combined = right;
+      break;
+
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      __ mov(combined, right);
+      __ or_(combined, Operand(left));
+      break;
+
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Move the right operand into ecx for the shift operation, use eax
+      // for the smi check register.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));
+      combined = right;
+      break;
+
+    default:
+      break;
+  }
+
+  // 3. Perform the smi check of the operands.
+  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
+  __ test(combined, Immediate(kSmiTagMask));
+  __ j(not_zero, &not_smis, not_taken);
+
+  // 4. Operands are both smis, perform the operation leaving the result in
+  // eax and check the result if necessary.
+  Comment perform_smi(masm, "-- Perform smi operation");
+  Label use_fp_on_smis;
+  switch (op_) {
+    case Token::BIT_OR:
+      // Nothing to do.
+      break;
+
+    case Token::BIT_XOR:
+      ASSERT(right.is(eax));
+      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      break;
+
+    case Token::BIT_AND:
+      ASSERT(right.is(eax));
+      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      break;
+
+    case Token::SHL:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shl_cl(left);
+      // Check that the *signed* result fits in a smi.
+      __ cmp(left, 0xc0000000);
+      __ j(sign, &use_fp_on_smis, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SAR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ sar_cl(left);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SHR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shr_cl(left);
+      // Check that the *unsigned* result fits in a smi.
+      // Neither of the two high-order bits can be set:
+      // - 0x80000000: high bit would be lost when smi tagging.
+      // - 0x40000000: this number would convert to negative when
+      // Smi tagging these two cases can only happen with shifts
+      // by 0 or 1 when handed a valid smi.
+      __ test(left, Immediate(0xc0000000));
+      __ j(not_zero, slow, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::ADD:
+      ASSERT(right.is(eax));
+      __ add(right, Operand(left));  // Addition is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      break;
+
+    case Token::SUB:
+      __ sub(left, Operand(right));
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ mov(eax, left);
+      break;
+
+    case Token::MUL:
+      // If the smi tag is 0 we can just leave the tag on one operand.
+      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // We can't revert the multiplication if the result is not a smi
+      // so save the right operand.
+      __ mov(ebx, right);
+      // Remove tag from one of the operands (but keep sign).
+      __ SmiUntag(right);
+      // Do multiplication.
+      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
+      break;
+
+    case Token::DIV:
+      // We can't revert the division if the result is not a smi so
+      // save the left operand.
+      __ mov(edi, left);
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &use_fp_on_smis, not_taken);
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by idiv
+      // instruction.
+      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      __ cmp(eax, 0x40000000);
+      __ j(equal, &use_fp_on_smis);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
+      // Check that the remainder is zero.
+      __ test(edx, Operand(edx));
+      __ j(not_zero, &use_fp_on_smis);
+      // Tag the result and store it in register eax.
+      __ SmiTag(eax);
+      break;
+
+    case Token::MOD:
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &not_smis, not_taken);
+
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(edx, combined, slow);
+      // Move remainder to register eax.
+      __ mov(eax, edx);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // 5. Emit return of result in eax.  Some operations have registers pushed.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      __ ret(0);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      __ ret(2 * kPointerSize);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // 6. For some operations emit inline code to perform floating point
+  // operations on known smis (e.g., if the result of the operation
+  // overflowed the smi range).
+  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+    __ bind(&use_fp_on_smis);
+    switch (op_) {
+      // Undo the effects of some operations, and some register moves.
+      case Token::SHL:
+        // The arguments are saved on the stack, and only used from there.
+        break;
+      case Token::ADD:
+        // Revert right = right + left.
+        __ sub(right, Operand(left));
+        break;
+      case Token::SUB:
+        // Revert left = left - right.
+        __ add(left, Operand(right));
+        break;
+      case Token::MUL:
+        // Right was clobbered but a copy is in ebx.
+        __ mov(right, ebx);
+        break;
+      case Token::DIV:
+        // Left was clobbered but a copy is in edi.  Right is in ebx for
+        // division.  They should be in eax, ebx for jump to not_smi.
+        __ mov(eax, edi);
+        break;
+      default:
+        // No other operators jump to use_fp_on_smis.
+        break;
+    }
+    __ jmp(&not_smis);
+  } else {
+    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
+    switch (op_) {
+      case Token::SHL: {
+        Comment perform_float(masm, "-- Perform float operation on smis");
+        __ bind(&use_fp_on_smis);
+        // Result we want is in left == edx, so we can put the allocated heap
+        // number in eax.
+        __ AllocateHeapNumber(eax, ecx, ebx, slow);
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(left));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          // It's OK to overwrite the right argument on the stack because we
+          // are about to return.
+          __ mov(Operand(esp, 1 * kPointerSize), left);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+      __ ret(2 * kPointerSize);
+      break;
+      }
+
+      case Token::ADD:
+      case Token::SUB:
+      case Token::MUL:
+      case Token::DIV: {
+        Comment perform_float(masm, "-- Perform float operation on smis");
+        __ bind(&use_fp_on_smis);
+        // Restore arguments to edx, eax.
+        switch (op_) {
+          case Token::ADD:
+            // Revert right = right + left.
+            __ sub(right, Operand(left));
+            break;
+          case Token::SUB:
+            // Revert left = left - right.
+            __ add(left, Operand(right));
+            break;
+          case Token::MUL:
+            // Right was clobbered but a copy is in ebx.
+            __ mov(right, ebx);
+            break;
+          case Token::DIV:
+            // Left was clobbered but a copy is in edi.  Right is in ebx for
+            // division.
+            __ mov(edx, edi);
+            __ mov(eax, right);
+            break;
+          default: UNREACHABLE();
+            break;
+        }
+        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ addsd(xmm0, xmm1); break;
+            case Token::SUB: __ subsd(xmm0, xmm1); break;
+            case Token::MUL: __ mulsd(xmm0, xmm1); break;
+            case Token::DIV: __ divsd(xmm0, xmm1); break;
+            default: UNREACHABLE();
+          }
+          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+        } else {  // SSE2 not available, use FPU.
+          FloatingPointHelper::LoadFloatSmis(masm, ebx);
+          switch (op_) {
+            case Token::ADD: __ faddp(1); break;
+            case Token::SUB: __ fsubp(1); break;
+            case Token::MUL: __ fmulp(1); break;
+            case Token::DIV: __ fdivp(1); break;
+            default: UNREACHABLE();
+          }
+          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+        }
+        __ mov(eax, ecx);
+        __ ret(0);
+        break;
+      }
+
+      default:
+        break;
+    }
+  }
+
+  // 7. Non-smi operands, fall out to the non-smi code with the operands in
+  // edx and eax.
+  Comment done_comment(masm, "-- Enter non-smi code");
+  __ bind(&not_smis);
+  switch (op_) {
+    case Token::BIT_OR:
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Right operand is saved in ecx and eax was destroyed by the smi
+      // check.
+      __ mov(eax, ecx);
+      break;
+
+    case Token::DIV:
+    case Token::MOD:
+      // Operands are in eax, ebx at this point.
+      __ mov(edx, eax);
+      __ mov(eax, ebx);
+      break;
+
+    default:
+      break;
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+  Label call_runtime;
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateRegisterArgsPush(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+  } else {
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      GenerateTypeTransition(masm);
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
+  ASSERT(op_ == Token::ADD);
+  // If one of the arguments is a string, call the string add stub.
+  // Otherwise, transition to the generic TRBinaryOpIC type.
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+
+  // Test if left operand is a string.
+  NearLabel left_not_string;
+  __ test(left, Immediate(kSmiTagMask));
+  __ j(zero, &left_not_string);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &left_not_string);
+
+  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_left_stub);
+
+  // Left operand is not a string, test right.
+  __ bind(&left_not_string);
+  __ test(right, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_right_stub);
+
+  // Neither argument is a string.
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      Label not_int32;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        // Check result type if it is currently Int32.
+        if (result_type_ <= TRBinaryOpIC::INT32) {
+          __ cvttsd2si(ecx, Operand(xmm0));
+          __ cvtsi2sd(xmm2, Operand(ecx));
+          __ ucomisd(xmm0, xmm2);
+          __ j(not_zero, &not_int32);
+          __ j(carry, &not_int32);
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
+      }
+
+      __ bind(&not_floats);
+      __ bind(&not_int32);
+      GenerateTypeTransition(masm);
+      break;
+    }
+
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      GenerateRegisterArgsPush(masm);
+      Label not_floats;
+      Label not_int32;
+      Label non_smi_result;
+      /*  {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
+        }*/
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &not_floats);
+      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
+                                                        &not_int32);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+      }
+
+      __ bind(&not_floats);
+      __ bind(&not_int32);
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If an allocation fails, or SHR or MOD hit a hard case,
+  // use the runtime system to get the correct result.
+  __ bind(&call_runtime);
+
+  switch (op_) {
+    case Token::ADD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
+         operands_type_ == TRBinaryOpIC::INT32);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
+      }
+
+      __ bind(&not_floats);
+      GenerateTypeTransition(masm);
+      break;
+    }
+
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      GenerateRegisterArgsPush(masm);
+      Label not_floats;
+      Label non_smi_result;
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &not_floats);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+            // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
+      }
+
+      __ bind(&not_floats);
+      GenerateTypeTransitionWithSavedArgs(masm);
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If an allocation fails, or SHR or MOD hit a hard case,
+  // use the runtime system to get the correct result.
+  __ bind(&call_runtime);
+
+  switch (op_) {
+    case Token::ADD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+  Label call_runtime;
+
+  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
+
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+      break;
+    case Token::MOD:
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR:
+      GenerateRegisterArgsPush(masm);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+
+  // Floating point case.
+  switch (op_) {
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Label not_floats;
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        __ ret(0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        __ ret(0);
+        __ bind(&after_alloc_failure);
+          __ ffree();
+          __ jmp(&call_runtime);
+      }
+        __ bind(&not_floats);
+        break;
+      }
+    case Token::MOD: {
+      // For MOD we go directly to runtime in the non-smi case.
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND:
+      case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHL:
+    case Token::SHR: {
+      Label non_smi_result;
+      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
+                                                  use_sse3_,
+                                                  &call_runtime);
+      switch (op_) {
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::SAR: __ sar_cl(eax); break;
+        case Token::SHL: __ shl_cl(eax); break;
+        case Token::SHR: __ shr_cl(eax); break;
+        default: UNREACHABLE();
+      }
+      if (op_ == Token::SHR) {
+        // Check if result is non-negative and fits in a smi.
+        __ test(eax, Immediate(0xc0000000));
+        __ j(not_zero, &call_runtime);
+      } else {
+        // Check if result fits in a smi.
+        __ cmp(eax, 0xc0000000);
+        __ j(negative, &non_smi_result);
+      }
+      // Tag smi result and return.
+      __ SmiTag(eax);
+      __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
+
+      // All ops except SHR return a signed int32 that we load in
+      // a HeapNumber.
+      if (op_ != Token::SHR) {
+        __ bind(&non_smi_result);
+        // Allocate a heap number if needed.
+        __ mov(ebx, Operand(eax));  // ebx: result
+        NearLabel skip_allocation;
+        switch (mode_) {
+          case OVERWRITE_LEFT:
+          case OVERWRITE_RIGHT:
+            // If the operand was an object, we skip the
+              // allocation of a heap number.
+            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+                                1 * kPointerSize : 2 * kPointerSize));
+            __ test(eax, Immediate(kSmiTagMask));
+            __ j(not_zero, &skip_allocation, not_taken);
+            // Fall through!
+          case NO_OVERWRITE:
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
+            __ bind(&skip_allocation);
+            break;
+          default: UNREACHABLE();
+        }
+        // Store the result in the HeapNumber and return.
+        if (CpuFeatures::IsSupported(SSE2)) {
+          CpuFeatures::Scope use_sse2(SSE2);
+          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+        } else {
+          __ mov(Operand(esp, 1 * kPointerSize), ebx);
+          __ fild_s(Operand(esp, 1 * kPointerSize));
+          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+        }
+        __ ret(2 * kPointerSize);
+      }
+      break;
+    }
+    default: UNREACHABLE(); break;
+  }
+
+  // If all else fails, use the runtime system to get the correct
+  // result.
+  __ bind(&call_runtime);
+  switch (op_) {
+    case Token::ADD: {
+      GenerateRegisterArgsPush(masm);
+      // Test for string arguments before calling runtime.
+      // Registers containing left and right operands respectively.
+      Register lhs, rhs;
+      lhs = edx;
+      rhs = eax;
+
+      // Test if left operand is a string.
+      NearLabel lhs_not_string;
+      __ test(lhs, Immediate(kSmiTagMask));
+      __ j(zero, &lhs_not_string);
+      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
+      __ j(above_equal, &lhs_not_string);
+
+      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+      __ TailCallStub(&string_add_left_stub);
+
+      NearLabel call_add_runtime;
+      // Left operand is not a string, test right.
+      __ bind(&lhs_not_string);
+      __ test(rhs, Immediate(kSmiTagMask));
+      __ j(zero, &call_add_runtime);
+      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
+      __ j(above_equal, &call_add_runtime);
+
+      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+      __ TailCallStub(&string_add_right_stub);
+
+      // Neither argument is a string.
+      __ bind(&call_add_runtime);
+      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+      break;
+    }
+    case Token::SUB:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+      break;
+    case Token::MUL:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+      break;
+    case Token::DIV:
+      GenerateRegisterArgsPush(masm);
+      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+      break;
+    case Token::MOD:
+      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+      break;
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+    MacroAssembler* masm,
+    Label* alloc_failure) {
+  Label skip_allocation;
+  OverwriteMode mode = mode_;
+  switch (mode) {
+    case OVERWRITE_LEFT: {
+      // If the argument in edx is already an object, we skip the
+      // allocation of a heap number.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now edx can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(edx, Operand(ebx));
+      __ bind(&skip_allocation);
+      // Use object in edx as a result holder
+      __ mov(eax, Operand(edx));
+      break;
+    }
+    case OVERWRITE_RIGHT:
+      // If the argument in eax is already an object, we skip the
+      // allocation of a heap number.
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Fall through!
+    case NO_OVERWRITE:
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now eax can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(eax, ebx);
+      __ bind(&skip_allocation);
+      break;
+    default: UNREACHABLE();
+  }
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+  __ pop(ecx);
+  __ push(edx);
+  __ push(eax);
+  __ push(ecx);
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
-  // Input on stack:
-  // esp[4]: argument (should be number).
-  // esp[0]: return address.
-  // Test that eax is a number.
+  // TAGGED case:
+  //   Input:
+  //     esp[4]: tagged number input argument (should be number).
+  //     esp[0]: return address.
+  //   Output:
+  //     eax: tagged double result.
+  // UNTAGGED case:
+  //   Input::
+  //     esp[0]: return address.
+  //     xmm1: untagged double input argument
+  //   Output:
+  //     xmm1: untagged double result.
+
   Label runtime_call;
   Label runtime_call_clear_stack;
-  NearLabel input_not_smi;
-  NearLabel loaded;
-  __ mov(eax, Operand(esp, kPointerSize));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(not_zero, &input_not_smi);
-  // Input is a smi. Untag and load it onto the FPU stack.
-  // Then load the low and high words of the double into ebx, edx.
-  STATIC_ASSERT(kSmiTagSize == 1);
-  __ sar(eax, 1);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
-  __ mov(Operand(esp, 0), eax);
-  __ fild_s(Operand(esp, 0));
-  __ fst_d(Operand(esp, 0));
-  __ pop(edx);
-  __ pop(ebx);
-  __ jmp(&loaded);
-  __ bind(&input_not_smi);
-  // Check if input is a HeapNumber.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
-  __ j(not_equal, &runtime_call);
-  // Input is a HeapNumber. Push it on the FPU stack and load its
-  // low and high words into ebx, edx.
-  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
-  __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+  Label skip_cache;
+  const bool tagged = (argument_type_ == TAGGED);
+  if (tagged) {
+    // Test that eax is a number.
+    NearLabel input_not_smi;
+    NearLabel loaded;
+    __ mov(eax, Operand(esp, kPointerSize));
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(not_zero, &input_not_smi);
+    // Input is a smi. Untag and load it onto the FPU stack.
+    // Then load the low and high words of the double into ebx, edx.
+    STATIC_ASSERT(kSmiTagSize == 1);
+    __ sar(eax, 1);
+    __ sub(Operand(esp), Immediate(2 * kPointerSize));
+    __ mov(Operand(esp, 0), eax);
+    __ fild_s(Operand(esp, 0));
+    __ fst_d(Operand(esp, 0));
+    __ pop(edx);
+    __ pop(ebx);
+    __ jmp(&loaded);
+    __ bind(&input_not_smi);
+    // Check if input is a HeapNumber.
+    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+    __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+    __ j(not_equal, &runtime_call);
+    // Input is a HeapNumber. Push it on the FPU stack and load its
+    // low and high words into ebx, edx.
+    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+    __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
 
-  __ bind(&loaded);
-  // ST[0] == double value
+    __ bind(&loaded);
+  } else {  // UNTAGGED.
+    if (CpuFeatures::IsSupported(SSE4_1)) {
+      CpuFeatures::Scope sse4_scope(SSE4_1);
+      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
+    } else {
+      __ pshufd(xmm0, xmm1, 0x1);
+      __ movd(Operand(edx), xmm0);
+    }
+    __ movd(Operand(ebx), xmm1);
+  }
+
+  // ST[0] or xmm1  == double value
   // ebx = low 32 bits of double value
   // edx = high 32 bits of double value
   // Compute hash (the shifts are arithmetic):
@@ -1266,7 +2546,7 @@
   ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
   __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
 
-  // ST[0] == double value.
+  // ST[0] or xmm1 == double value.
   // ebx = low 32 bits of double value.
   // edx = high 32 bits of double value.
   // ecx = TranscendentalCache::hash(double value).
@@ -1303,33 +2583,83 @@
   __ j(not_equal, &cache_miss);
   // Cache hit!
   __ mov(eax, Operand(ecx, 2 * kIntSize));
-  __ fstp(0);
-  __ ret(kPointerSize);
+  if (tagged) {
+    __ fstp(0);
+    __ ret(kPointerSize);
+  } else {  // UNTAGGED.
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ Ret();
+  }
 
   __ bind(&cache_miss);
   // Update cache with new value.
   // We are short on registers, so use no_reg as scratch.
   // This gives slightly larger code.
-  __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+  if (tagged) {
+    __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+  } else {  // UNTAGGED.
+    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
+    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ movdbl(Operand(esp, 0), xmm1);
+    __ fld_d(Operand(esp, 0));
+    __ add(Operand(esp), Immediate(kDoubleSize));
+  }
   GenerateOperation(masm);
   __ mov(Operand(ecx, 0), ebx);
   __ mov(Operand(ecx, kIntSize), edx);
   __ mov(Operand(ecx, 2 * kIntSize), eax);
   __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ ret(kPointerSize);
+  if (tagged) {
+    __ ret(kPointerSize);
+  } else {  // UNTAGGED.
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ Ret();
 
-  __ bind(&runtime_call_clear_stack);
-  __ fstp(0);
-  __ bind(&runtime_call);
-  __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+    // Skip cache and return answer directly, only in untagged case.
+    __ bind(&skip_cache);
+    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ movdbl(Operand(esp, 0), xmm1);
+    __ fld_d(Operand(esp, 0));
+    GenerateOperation(masm);
+    __ fstp_d(Operand(esp, 0));
+    __ movdbl(xmm1, Operand(esp, 0));
+    __ add(Operand(esp), Immediate(kDoubleSize));
+    // We return the value in xmm1 without adding it to the cache, but
+    // we cause a scavenging GC so that future allocations will succeed.
+    __ EnterInternalFrame();
+    // Allocate an unused object bigger than a HeapNumber.
+    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
+    __ Ret();
+  }
+
+  // Call runtime, doing whatever allocation and cleanup is necessary.
+  if (tagged) {
+    __ bind(&runtime_call_clear_stack);
+    __ fstp(0);
+    __ bind(&runtime_call);
+    __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
+  } else {  // UNTAGGED.
+    __ bind(&runtime_call_clear_stack);
+    __ bind(&runtime_call);
+    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
+    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+    __ EnterInternalFrame();
+    __ push(eax);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ Ret();
+  }
 }
 
 
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   switch (type_) {
-    // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
       return Runtime::kAbort;
@@ -1339,85 +2669,90 @@
 
 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
   // Only free register is edi.
-  NearLabel done;
-  ASSERT(type_ == TranscendentalCache::SIN ||
-         type_ == TranscendentalCache::COS);
-  // More transcendental types can be added later.
+  // Input value is on FP stack, and also in ebx/edx.
+  // Input value is possibly in xmm1.
+  // Address of result (a newly allocated HeapNumber) may be in eax.
+  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+    // Both fsin and fcos require arguments in the range +/-2^63 and
+    // return NaN for infinities and NaN. They can share all code except
+    // the actual fsin/fcos operation.
+    NearLabel in_range, done;
+    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+    // work. We must reduce it to the appropriate range.
+    __ mov(edi, edx);
+    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
+    int supported_exponent_limit =
+        (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+    __ j(below, &in_range, taken);
+    // Check for infinity and NaN. Both return NaN for sin.
+    __ cmp(Operand(edi), Immediate(0x7ff00000));
+    NearLabel non_nan_result;
+    __ j(not_equal, &non_nan_result, taken);
+    // Input is +/-Infinity or NaN. Result is NaN.
+    __ fstp(0);
+    // NaN is represented by 0x7ff8000000000000.
+    __ push(Immediate(0x7ff80000));
+    __ push(Immediate(0));
+    __ fld_d(Operand(esp, 0));
+    __ add(Operand(esp), Immediate(2 * kPointerSize));
+    __ jmp(&done);
 
-  // Both fsin and fcos require arguments in the range +/-2^63 and
-  // return NaN for infinities and NaN. They can share all code except
-  // the actual fsin/fcos operation.
-  NearLabel in_range;
-  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
-  // work. We must reduce it to the appropriate range.
-  __ mov(edi, edx);
-  __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
-  int supported_exponent_limit =
-      (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-  __ cmp(Operand(edi), Immediate(supported_exponent_limit));
-  __ j(below, &in_range, taken);
-  // Check for infinity and NaN. Both return NaN for sin.
-  __ cmp(Operand(edi), Immediate(0x7ff00000));
-  NearLabel non_nan_result;
-  __ j(not_equal, &non_nan_result, taken);
-  // Input is +/-Infinity or NaN. Result is NaN.
-  __ fstp(0);
-  // NaN is represented by 0x7ff8000000000000.
-  __ push(Immediate(0x7ff80000));
-  __ push(Immediate(0));
-  __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
-  __ jmp(&done);
+    __ bind(&non_nan_result);
 
-  __ bind(&non_nan_result);
+    // Use fpmod to restrict argument to the range +/-2*PI.
+    __ mov(edi, eax);  // Save eax before using fnstsw_ax.
+    __ fldpi();
+    __ fadd(0);
+    __ fld(1);
+    // FPU Stack: input, 2*pi, input.
+    {
+      NearLabel no_exceptions;
+      __ fwait();
+      __ fnstsw_ax();
+      // Clear if Illegal Operand or Zero Division exceptions are set.
+      __ test(Operand(eax), Immediate(5));
+      __ j(zero, &no_exceptions);
+      __ fnclex();
+      __ bind(&no_exceptions);
+    }
 
-  // Use fpmod to restrict argument to the range +/-2*PI.
-  __ mov(edi, eax);  // Save eax before using fnstsw_ax.
-  __ fldpi();
-  __ fadd(0);
-  __ fld(1);
-  // FPU Stack: input, 2*pi, input.
-  {
-    NearLabel no_exceptions;
-    __ fwait();
-    __ fnstsw_ax();
-    // Clear if Illegal Operand or Zero Division exceptions are set.
-    __ test(Operand(eax), Immediate(5));
-    __ j(zero, &no_exceptions);
-    __ fnclex();
-    __ bind(&no_exceptions);
+    // Compute st(0) % st(1)
+    {
+      NearLabel partial_remainder_loop;
+      __ bind(&partial_remainder_loop);
+      __ fprem1();
+      __ fwait();
+      __ fnstsw_ax();
+      __ test(Operand(eax), Immediate(0x400 /* C2 */));
+      // If C2 is set, computation only has partial result. Loop to
+      // continue computation.
+      __ j(not_zero, &partial_remainder_loop);
+    }
+    // FPU Stack: input, 2*pi, input % 2*pi
+    __ fstp(2);
+    __ fstp(0);
+    __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
+
+    // FPU Stack: input % 2*pi
+    __ bind(&in_range);
+    switch (type_) {
+      case TranscendentalCache::SIN:
+        __ fsin();
+        break;
+      case TranscendentalCache::COS:
+        __ fcos();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    __ bind(&done);
+  } else {
+    ASSERT(type_ == TranscendentalCache::LOG);
+    __ fldln2();
+    __ fxch();
+    __ fyl2x();
   }
-
-  // Compute st(0) % st(1)
-  {
-    NearLabel partial_remainder_loop;
-    __ bind(&partial_remainder_loop);
-    __ fprem1();
-    __ fwait();
-    __ fnstsw_ax();
-    __ test(Operand(eax), Immediate(0x400 /* C2 */));
-    // If C2 is set, computation only has partial result. Loop to
-    // continue computation.
-    __ j(not_zero, &partial_remainder_loop);
-  }
-  // FPU Stack: input, 2*pi, input % 2*pi
-  __ fstp(2);
-  __ fstp(0);
-  __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
-
-  // FPU Stack: input % 2*pi
-  __ bind(&in_range);
-  switch (type_) {
-    case TranscendentalCache::SIN:
-      __ fsin();
-      break;
-    case TranscendentalCache::COS:
-      __ fcos();
-      break;
-    default:
-      UNREACHABLE();
-  }
-  __ bind(&done);
 }
 
 
@@ -1701,6 +3036,13 @@
 }
 
 
+void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
+                                                       bool use_sse3,
+                                                       Label* not_int32) {
+  return;
+}
+
+
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   NearLabel load_smi, done;
@@ -1796,6 +3138,22 @@
 }
 
 
+void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
+                                                    Label* non_int32,
+                                                    Register scratch) {
+  __ cvttsd2si(scratch, Operand(xmm0));
+  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ ucomisd(xmm0, xmm2);
+  __ j(not_zero, non_int32);
+  __ j(carry, non_int32);
+  __ cvttsd2si(scratch, Operand(xmm1));
+  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ ucomisd(xmm1, xmm2);
+  __ j(not_zero, non_int32);
+  __ j(carry, non_int32);
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register scratch,
                                             ArgLocation arg_location) {
@@ -1879,6 +3237,12 @@
 }
 
 
+void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
+                                                     Label* non_int32) {
+  return;
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   Label slow, done, undo;
 
@@ -2013,6 +3377,160 @@
 }
 
 
+void MathPowStub::Generate(MacroAssembler* masm) {
+  // Registers are used as follows:
+  // edx = base
+  // eax = exponent
+  // ecx = temporary, result
+
+  CpuFeatures::Scope use_sse2(SSE2);
+  Label allocate_return, call_runtime;
+
+  // Load input parameters.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // Save 1 in xmm3 - we need this several times later on.
+  __ mov(ecx, Immediate(1));
+  __ cvtsi2sd(xmm3, Operand(ecx));
+
+  Label exponent_nonsmi;
+  Label base_nonsmi;
+  // If the exponent is a heap number go to that specific case.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &exponent_nonsmi);
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &base_nonsmi);
+
+  // Optimized version when both exponent and base is a smi.
+  Label powi;
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&powi);
+  // exponent is smi and base is a heapnumber.
+  __ bind(&base_nonsmi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // Optimized version of pow if exponent is a smi.
+  // xmm0 contains the base.
+  __ bind(&powi);
+  __ SmiUntag(eax);
+
+  // Save exponent in base as we need to check if exponent is negative later.
+  // We know that base and exponent are in different registers.
+  __ mov(edx, eax);
+
+  // Get absolute value of exponent.
+  NearLabel no_neg;
+  __ cmp(eax, 0);
+  __ j(greater_equal, &no_neg);
+  __ neg(eax);
+  __ bind(&no_neg);
+
+  // Load xmm1 with 1.
+  __ movsd(xmm1, xmm3);
+  NearLabel while_true;
+  NearLabel no_multiply;
+
+  __ bind(&while_true);
+  __ shr(eax, 1);
+  __ j(not_carry, &no_multiply);
+  __ mulsd(xmm1, xmm0);
+  __ bind(&no_multiply);
+  __ test(eax, Operand(eax));
+  __ mulsd(xmm0, xmm0);
+  __ j(not_zero, &while_true);
+
+  // base has the original value of the exponent - if the exponent  is
+  // negative return 1/result.
+  __ test(edx, Operand(edx));
+  __ j(positive, &allocate_return);
+  // Special case if xmm1 has reached infinity.
+  __ mov(ecx, Immediate(0x7FB00000));
+  __ movd(xmm0, Operand(ecx));
+  __ cvtss2sd(xmm0, xmm0);
+  __ ucomisd(xmm0, xmm1);
+  __ j(equal, &call_runtime);
+  __ divsd(xmm3, xmm1);
+  __ movsd(xmm1, xmm3);
+  __ jmp(&allocate_return);
+
+  // exponent (or both) is a heapnumber - no matter what we should now work
+  // on doubles.
+  __ bind(&exponent_nonsmi);
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  // Test if exponent is nan.
+  __ ucomisd(xmm1, xmm1);
+  __ j(parity_even, &call_runtime);
+
+  NearLabel base_not_smi;
+  NearLabel handle_special_cases;
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(not_zero, &base_not_smi);
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&handle_special_cases);
+
+  __ bind(&base_not_smi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+  __ and_(ecx, HeapNumber::kExponentMask);
+  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+  // base is NaN or +/-Infinity
+  __ j(greater_equal, &call_runtime);
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // base is in xmm0 and exponent is in xmm1.
+  __ bind(&handle_special_cases);
+  NearLabel not_minus_half;
+  // Test for -0.5.
+  // Load xmm2 with -0.5.
+  __ mov(ecx, Immediate(0xBF000000));
+  __ movd(xmm2, Operand(ecx));
+  __ cvtss2sd(xmm2, xmm2);
+  // xmm2 now has -0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &not_minus_half);
+
+  // Calculates reciprocal of square root.
+  // Note that 1/sqrt(x) = sqrt(1/x))
+  __ divsd(xmm3, xmm0);
+  __ movsd(xmm1, xmm3);
+  __ sqrtsd(xmm1, xmm1);
+  __ jmp(&allocate_return);
+
+  // Test for 0.5.
+  __ bind(&not_minus_half);
+  // Load xmm2 with 0.5.
+  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+  __ addsd(xmm2, xmm3);
+  // xmm2 now has 0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &call_runtime);
+  // Calculates square root.
+  __ movsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+
+  __ bind(&allocate_return);
+  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
+  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
+  __ mov(eax, ecx);
+  __ ret(2);
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+}
+
+
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in edx and the parameter count is in eax.
 
@@ -2507,6 +4025,87 @@
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  NearLabel done;
+  __ mov(ebx, Operand(esp, kPointerSize * 3));
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slowcase);
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ j(above, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                        times_half_pointer_size,
+                        ebx,  // In: Number of elements (times 2, being a smi)
+                        eax,  // Out: Start of allocation (tagged).
+                        ecx,  // Out: End of allocation.
+                        edx,  // Scratch register
+                        &slowcase,
+                        TAG_OBJECT);
+  // eax: Start of allocated area, object-tagged.
+
+  // Set JSArray map to global.regexp_result_map().
+  // Set empty properties FixedArray.
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  // Interleave operations for better latency.
+  __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(ecx, Immediate(Factory::empty_fixed_array()));
+  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
+  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
+  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
+  __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
+
+  // Set input, index and length fields from arguments.
+  __ mov(ecx, Operand(esp, kPointerSize * 1));
+  __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
+  __ mov(ecx, Operand(esp, kPointerSize * 2));
+  __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
+  __ mov(ecx, Operand(esp, kPointerSize * 3));
+  __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
+
+  // Fill out the elements FixedArray.
+  // eax: JSArray.
+  // ebx: FixedArray.
+  // ecx: Number of elements in array, as smi.
+
+  // Set map.
+  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  // Set length.
+  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
+  // Fill contents of fixed-array with the-hole.
+  __ SmiUntag(ecx);
+  __ mov(edx, Immediate(Factory::the_hole_value()));
+  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
+  // Fill fixed array elements with hole.
+  // eax: JSArray.
+  // ecx: Number of elements to fill.
+  // ebx: Start of elements in FixedArray.
+  // edx: the hole.
+  Label loop;
+  __ test(ecx, Operand(ecx));
+  __ bind(&loop);
+  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ sub(Operand(ecx), Immediate(1));
+  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
                                                          Register object,
                                                          Register result,
@@ -3125,7 +4724,7 @@
   __ j(zero, &failure_returned, not_taken);
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles_);
   __ ret(0);
 
   // Handling of failure.
@@ -3225,7 +4824,7 @@
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles_);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -3375,76 +4974,125 @@
 
 
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  // Get the object - go slow case if it's a smi.
-  Label slow;
-  __ mov(eax, Operand(esp, 2 * kPointerSize));  // 2 ~ return address, function
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &slow, not_taken);
+  // Fixed register usage throughout the stub.
+  Register object = eax;  // Object (lhs).
+  Register map = ebx;  // Map of the object.
+  Register function = edx;  // Function (rhs).
+  Register prototype = edi;  // Prototype of the function.
+  Register scratch = ecx;
+
+  // Get the object and function - they are always both needed.
+  Label slow, not_js_object;
+  if (!args_in_registers()) {
+    __ mov(object, Operand(esp, 2 * kPointerSize));
+    __ mov(function, Operand(esp, 1 * kPointerSize));
+  }
 
   // Check that the left hand is a JS object.
-  __ IsObjectJSObjectType(eax, eax, edx, &slow);
-
-  // Get the prototype of the function.
-  __ mov(edx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
-  // edx is function, eax is map.
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(zero, &not_js_object, not_taken);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
 
   // Look up the function and the map in the instanceof cache.
   NearLabel miss;
   ExternalReference roots_address = ExternalReference::roots_address();
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ cmp(edx, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+  __ cmp(function,
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
   __ j(not_equal, &miss);
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ cmp(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+  __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
   __ j(not_equal, &miss);
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
-  __ mov(eax, Operand::StaticArray(ecx, times_pointer_size, roots_address));
-  __ ret(2 * kPointerSize);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+  __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
+  __ IncrementCounter(&Counters::instance_of_cache, 1);
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
 
   __ bind(&miss);
-  __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
-  __ test(ebx, Immediate(kSmiTagMask));
+  __ test(prototype, Immediate(kSmiTagMask));
   __ j(zero, &slow, not_taken);
-  __ IsObjectJSObjectType(ebx, ecx, ecx, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
 
-  // Register mapping:
-  //   eax is object map.
-  //   edx is function.
-  //   ebx is function prototype.
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), edx);
+  // Update the golbal instanceof cache with the current map and function. The
+  // cached answer will be set when it is known.
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
+         function);
 
-  __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
-
-  // Loop through the prototype chain looking for the function prototype.
+  // Loop through the prototype chain of the object looking for the function
+  // prototype.
+  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
   NearLabel loop, is_instance, is_not_instance;
   __ bind(&loop);
-  __ cmp(ecx, Operand(ebx));
+  __ cmp(scratch, Operand(prototype));
   __ j(equal, &is_instance);
-  __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+  __ cmp(Operand(scratch), Immediate(Factory::null_value()));
   __ j(equal, &is_not_instance);
-  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
 
   __ bind(&is_instance);
+  __ IncrementCounter(&Counters::instance_of_stub_true, 1);
   __ Set(eax, Immediate(0));
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
-  __ ret(2 * kPointerSize);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
 
   __ bind(&is_not_instance);
+  __ IncrementCounter(&Counters::instance_of_stub_false, 1);
   __ Set(eax, Immediate(Smi::FromInt(1)));
-  __ mov(ecx, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
-  __ mov(Operand::StaticArray(ecx, times_pointer_size, roots_address), eax);
-  __ ret(2 * kPointerSize);
+  __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ test(function, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
+  __ j(not_equal, &slow, not_taken);
+
+  // Null is not instance of anything.
+  __ cmp(object, Factory::null_value());
+  __ j(not_equal, &object_not_null);
+  __ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+  __ bind(&object_not_null);
+  // Smi values is not instance of anything.
+  __ test(object, Immediate(kSmiTagMask));
+  __ j(not_zero, &object_not_null_or_smi, not_taken);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
+
+  __ bind(&object_not_null_or_smi);
+  // String values is not instance of anything.
+  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
+  __ j(NegateCondition(is_string), &slow);
+  __ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
+  __ Set(eax, Immediate(Smi::FromInt(1)));
+  __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
   __ bind(&slow);
+  if (args_in_registers()) {
+    // Push arguments below return address.
+    __ pop(scratch);
+    __ push(object);
+    __ push(function);
+    __ push(scratch);
+  }
+  __ IncrementCounter(&Counters::instance_of_slow, 1);
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
 }
 
@@ -4573,6 +6221,192 @@
   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
+
+void StringCharAtStub::Generate(MacroAssembler* masm) {
+  // Expects two arguments (object, index) on the stack:
+
+  // Stack frame on entry.
+  //  esp[0]: return address
+  //  esp[4]: index
+  //  esp[8]: object
+
+  Register object = ebx;
+  Register index = eax;
+  Register scratch1 = ecx;
+  Register scratch2 = edx;
+  Register result = eax;
+
+  __ pop(scratch1);  // Return address.
+  __ pop(index);
+  __ pop(object);
+  __ push(scratch1);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch1,
+                                  scratch2,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ Set(result, Immediate(Factory::empty_string()));
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ Set(result, Immediate(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  StubRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&done);
+  __ ret(0);
+}
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::SMIS);
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ or_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  if (GetCondition() == equal) {
+    // For equality we do not care about the sign of the result.
+    __ sub(eax, Operand(edx));
+  } else {
+    NearLabel done;
+    __ sub(edx, Operand(eax));
+    __ j(no_overflow, &done);
+    // Correct sign of result in case of overflow.
+    __ not_(edx);
+    __ bind(&done);
+    __ mov(eax, edx);
+  }
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+  NearLabel generic_stub;
+  NearLabel unordered;
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &generic_stub, not_taken);
+
+  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved or SS2 or CMOV is unsupported.
+  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
+    CpuFeatures::Scope scope1(SSE2);
+    CpuFeatures::Scope scope2(CMOV);
+
+    // Load left and right operand
+    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+
+    // Compare operands
+    __ ucomisd(xmm0, xmm1);
+
+    // Don't base result on EFLAGS when a NaN is involved.
+    __ j(parity_even, &unordered, not_taken);
+
+    // Return a result of -1, 0, or 1, based on EFLAGS.
+    // Performing mov, because xor would destroy the flag register.
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, Operand(ecx));
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, Operand(ecx));
+    __ ret(0);
+
+    __ bind(&unordered);
+  }
+
+  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+  __ bind(&generic_stub);
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  ASSERT(state_ == CompareIC::OBJECTS);
+  NearLabel miss;
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
+  __ j(not_equal, &miss, not_taken);
+
+  ASSERT(GetCondition() == equal);
+  __ sub(eax, Operand(edx));
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  // Save the registers.
+  __ pop(ecx);
+  __ push(edx);
+  __ push(eax);
+  __ push(ecx);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+  __ EnterInternalFrame();
+  __ push(edx);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+
+  // Compute the entry point of the rewritten stub.
+  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+
+  // Restore registers.
+  __ pop(ecx);
+  __ pop(eax);
+  __ pop(edx);
+  __ push(ecx);
+
+  // Do a tail call to the rewritten stub.
+  __ jmp(Operand(edi));
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 351636f..f66a8c7 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -40,13 +40,21 @@
 // TranscendentalCache runtime function.
 class TranscendentalCacheStub: public CodeStub {
  public:
-  explicit TranscendentalCacheStub(TranscendentalCache::Type type)
-      : type_(type) {}
+  enum ArgumentType {
+    TAGGED = 0,
+    UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
+  };
+
+  explicit TranscendentalCacheStub(TranscendentalCache::Type type,
+                                   ArgumentType argument_type)
+      : type_(type), argument_type_(argument_type) {}
   void Generate(MacroAssembler* masm);
  private:
   TranscendentalCache::Type type_;
+  ArgumentType argument_type_;
+
   Major MajorKey() { return TranscendentalCache; }
-  int MinorKey() { return type_; }
+  int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
   void GenerateOperation(MacroAssembler* masm);
 };
@@ -83,7 +91,7 @@
         args_in_registers_(false),
         args_reversed_(false),
         static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::DEFAULT),
+        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
         name_(NULL) {
     if (static_operands_type_.IsSmi()) {
       mode_ = NO_OVERWRITE;
@@ -117,6 +125,11 @@
         || op_ == Token::MUL || op_ == Token::DIV;
   }
 
+  void SetArgsInRegisters() {
+    ASSERT(ArgsInRegistersSupported());
+    args_in_registers_ = true;
+  }
+
  private:
   Token::Value op_;
   OverwriteMode mode_;
@@ -157,7 +170,7 @@
   class ArgsReversedBits: public BitField<bool, 11, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
   class StaticTypeInfoBits: public BitField<int, 13, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -185,7 +198,6 @@
     return (op_ == Token::ADD) || (op_ == Token::MUL);
   }
 
-  void SetArgsInRegisters() { args_in_registers_ = true; }
   void SetArgsReversed() { args_reversed_ = true; }
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
   bool HasArgsInRegisters() { return args_in_registers_; }
@@ -207,6 +219,123 @@
     return BinaryOpIC::ToState(runtime_operands_type_);
   }
 
+  virtual void FinishCode(Code* code) {
+    code->set_binary_op_type(runtime_operands_type_);
+  }
+
+  friend class CodeGenerator;
+};
+
+
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+  TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        operands_type_(TRBinaryOpIC::UNINITIALIZED),
+        result_type_(TRBinaryOpIC::UNINITIALIZED),
+        name_(NULL) {
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
+    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+  }
+
+  TypeRecordingBinaryOpStub(
+      int key,
+      TRBinaryOpIC::TypeInfo operands_type,
+      TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+      : op_(OpBits::decode(key)),
+        mode_(ModeBits::decode(key)),
+        use_sse3_(SSE3Bits::decode(key)),
+        operands_type_(operands_type),
+        result_type_(result_type),
+        name_(NULL) { }
+
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
+
+ private:
+  enum SmiCodeGenerateHeapNumberResults {
+    ALLOW_HEAPNUMBER_RESULTS,
+    NO_HEAPNUMBER_RESULTS
+  };
+
+  Token::Value op_;
+  OverwriteMode mode_;
+  bool use_sse3_;
+
+  // Operand type information determined at runtime.
+  TRBinaryOpIC::TypeInfo operands_type_;
+  TRBinaryOpIC::TypeInfo result_type_;
+
+  char* name_;
+
+  const char* GetName();
+
+#ifdef DEBUG
+  void Print() {
+    PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+           "(mode %d, runtime_type_info %s)\n",
+           MinorKey(),
+           Token::String(op_),
+           static_cast<int>(mode_),
+           TRBinaryOpIC::GetName(operands_type_));
+  }
+#endif
+
+  // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 7> {};
+  class SSE3Bits: public BitField<bool, 9, 1> {};
+  class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+  class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+  Major MajorKey() { return TypeRecordingBinaryOp; }
+  int MinorKey() {
+    return OpBits::encode(op_)
+           | ModeBits::encode(mode_)
+           | SSE3Bits::encode(use_sse3_)
+           | OperandTypeInfoBits::encode(operands_type_)
+           | ResultTypeInfoBits::encode(result_type_);
+  }
+
+  void Generate(MacroAssembler* masm);
+  void GenerateGeneric(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm,
+                       Label* slow,
+                       SmiCodeGenerateHeapNumberResults heapnumber_results);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+  void GenerateUninitializedStub(MacroAssembler* masm);
+  void GenerateSmiStub(MacroAssembler* masm);
+  void GenerateInt32Stub(MacroAssembler* masm);
+  void GenerateHeapNumberStub(MacroAssembler* masm);
+  void GenerateStringStub(MacroAssembler* masm);
+  void GenerateGenericStub(MacroAssembler* masm);
+
+  void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+  void GenerateRegisterArgsPush(MacroAssembler* masm);
+  void GenerateTypeTransition(MacroAssembler* masm);
+  void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+  virtual InlineCacheState GetICState() {
+    return TRBinaryOpIC::ToState(operands_type_);
+  }
+
+  virtual void FinishCode(Code* code) {
+    code->set_type_recording_binary_op_type(operands_type_);
+    code->set_type_recording_binary_op_result_type(result_type_);
+  }
+
   friend class CodeGenerator;
 };
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 04ff200..2f14e82 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -104,12 +104,12 @@
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -7398,6 +7398,7 @@
   Load(args->at(1));
   Load(args->at(2));
   Load(args->at(3));
+
   RegExpExecStub stub;
   Result result = frame_->CallStub(&stub, 4);
   frame_->Push(&result);
@@ -7405,91 +7406,15 @@
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
+
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope;
 
-    Label slowcase;
-    Label done;
-    __ mov(ebx, Operand(esp, kPointerSize * 2));
-    __ test(ebx, Immediate(kSmiTagMask));
-    __ j(not_zero, &slowcase);
-    __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
-    __ j(above, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                          times_half_pointer_size,
-                          ebx,  // In: Number of elements (times 2, being a smi)
-                          eax,  // Out: Start of allocation (tagged).
-                          ecx,  // Out: End of allocation.
-                          edx,  // Scratch register
-                          &slowcase,
-                          TAG_OBJECT);
-    // eax: Start of allocated area, object-tagged.
-
-    // Set JSArray map to global.regexp_result_map().
-    // Set empty properties FixedArray.
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    // Interleave operations for better latency.
-    __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
-    __ mov(ecx, Immediate(Factory::empty_fixed_array()));
-    __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
-    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
-    __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
-    __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
-    __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
-    __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
-
-    // Set input, index and length fields from arguments.
-    __ pop(FieldOperand(eax, JSRegExpResult::kInputOffset));
-    __ pop(FieldOperand(eax, JSRegExpResult::kIndexOffset));
-    __ pop(ecx);
-    __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
-
-    // Fill out the elements FixedArray.
-    // eax: JSArray.
-    // ebx: FixedArray.
-    // ecx: Number of elements in array, as smi.
-
-    // Set map.
-    __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
-           Immediate(Factory::fixed_array_map()));
-    // Set length.
-    __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
-    // Fill contents of fixed-array with the-hole.
-    __ SmiUntag(ecx);
-    __ mov(edx, Immediate(Factory::the_hole_value()));
-    __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
-    // Fill fixed array elements with hole.
-    // eax: JSArray.
-    // ecx: Number of elements to fill.
-    // ebx: Start of elements in FixedArray.
-    // edx: the hole.
-    Label loop;
-    __ test(ecx, Operand(ecx));
-    __ bind(&loop);
-    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
-    __ sub(Operand(ecx), Immediate(1));
-    __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
-    __ bind(&done);
-  }
-  frame_->Forget(3);
-  frame_->Push(eax);
+  RegExpConstructResultStub stub;
+  Result result = frame_->CallStub(&stub, 3);
+  frame_->Push(&result);
 }
 
 
@@ -7987,7 +7912,8 @@
 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
   Result result = frame_->CallStub(&stub, 1);
   frame_->Push(&result);
 }
@@ -7996,7 +7922,18 @@
 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
   Result result = frame_->CallStub(&stub, 1);
   frame_->Push(&result);
 }
@@ -8266,7 +8203,6 @@
       switch (op) {
         case Token::SUB: {
           __ neg(value.reg());
-          frame_->Push(&value);
           if (node->no_negative_zero()) {
             // -MIN_INT is MIN_INT with the overflow flag set.
             unsafe_bailout_->Branch(overflow);
@@ -8279,18 +8215,17 @@
         }
         case Token::BIT_NOT: {
           __ not_(value.reg());
-          frame_->Push(&value);
           break;
         }
         case Token::ADD: {
           // Unary plus has no effect on int32 values.
-          frame_->Push(&value);
           break;
         }
         default:
           UNREACHABLE();
           break;
       }
+      frame_->Push(&value);
     } else {
       Load(node->expression());
       bool can_overwrite = node->expression()->ResultOverwriteAllowed();
@@ -9208,7 +9143,7 @@
     case Token::INSTANCEOF: {
       if (!left_already_loaded) Load(left);
       Load(right);
-      InstanceofStub stub;
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       Result answer = frame_->CallStub(&stub, 2);
       answer.ToRegister();
       __ test(answer.reg(), Operand(answer.reg()));
@@ -10082,14 +10017,15 @@
 
 #define __ masm.
 
+
+static void MemCopyWrapper(void* dest, const void* src, size_t size) {
+  memcpy(dest, src, size);
+}
+
+
 MemCopyFunction CreateMemCopyFunction() {
-  size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
-  CHECK(buffer);
-  HandleScope handles;
-  MacroAssembler masm(buffer, static_cast<int>(actual_size));
+  HandleScope scope;
+  MacroAssembler masm(NULL, 1 * KB);
 
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10183,6 +10119,7 @@
       __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
       __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
 
+      __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
       __ pop(esi);
       __ pop(edi);
       __ ret(0);
@@ -10229,6 +10166,7 @@
       __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
       __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
 
+      __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
       __ pop(esi);
       __ pop(edi);
       __ ret(0);
@@ -10272,6 +10210,7 @@
     __ mov(eax, Operand(src, count, times_1, -4));
     __ mov(Operand(dst, count, times_1, -4), eax);
 
+    __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
     __ pop(esi);
     __ pop(edi);
     __ ret(0);
@@ -10279,8 +10218,15 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  // Call the function from C++.
-  return FUNCTION_CAST<MemCopyFunction>(buffer);
+  ASSERT(desc.reloc_size == 0);
+
+  // Copy the generated code into an executable chunk and return a pointer
+  // to the first instruction in it as a C++ function pointer.
+  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  if (chunk == NULL) return &MemCopyWrapper;
+  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
 }
 
 #undef __
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index d1a2036..46b12cb 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -43,9 +43,6 @@
 class RegisterFile;
 class RuntimeCallHelper;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
 
 // -------------------------------------------------------------------------
 // Reference support
@@ -310,6 +307,9 @@
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -398,8 +398,9 @@
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
+  virtual void VisitSlot(Slot* node);
 #define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
@@ -705,8 +706,9 @@
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
   void GenerateMathSqrt(ZoneList<Expression*>* args);
+  void GenerateMathLog(ZoneList<Expression*>* args);
 
-  // Check whether two RegExps are equivalent
+  // Check whether two RegExps are equivalent.
   void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
 
   void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
@@ -782,6 +784,7 @@
   friend class FastCodeGenerator;
   friend class FullCodeGenerator;
   friend class FullCodeGenSyntaxChecker;
+  friend class LCodeGen;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index b15140f..d64257f 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -42,7 +42,11 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Clear();
+  CpuFeatures::Probe(true);
+  if (!CpuFeatures::IsSupported(SSE2) || Serializer::enabled()) {
+    V8::DisableCrankshaft();
+  }
 }
 
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
new file mode 100644
index 0000000..d95df3e
--- /dev/null
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -0,0 +1,615 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::table_entry_size_ = 10;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  AssertNoAllocation no_allocation;
+
+  if (!function->IsOptimized()) return;
+
+  // Get the optimized code.
+  Code* code = function->code();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  // For each return after a safepoint insert a absolute call to the
+  // corresponding deoptimization entry.
+  unsigned last_pc_offset = 0;
+  SafepointTable table(function->code());
+  for (unsigned i = 0; i < table.length(); i++) {
+    unsigned pc_offset = table.GetPcOffset(i);
+    int deoptimization_index = table.GetDeoptimizationIndex(i);
+    int gap_code_size = table.GetGapCodeSize(i);
+#ifdef DEBUG
+    // Destroy the code which is not supposed to run again.
+    unsigned instructions = pc_offset - last_pc_offset;
+    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                          instructions);
+    for (unsigned i = 0; i < instructions; i++) {
+      destroyer.masm()->int3();
+    }
+#endif
+    last_pc_offset = pc_offset;
+    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+      CodePatcher patcher(
+          code->instruction_start() + pc_offset + gap_code_size,
+          Assembler::kCallInstructionLength);
+      patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY),
+                           RelocInfo::NONE);
+      last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
+    }
+  }
+#ifdef DEBUG
+  // Destroy the code which is not supposed to run again.
+  unsigned instructions = code->safepoint_table_start() - last_pc_offset;
+  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+                        instructions);
+  for (unsigned i = 0; i < instructions; i++) {
+    destroyer.masm()->int3();
+  }
+#endif
+
+  // Add the deoptimizing code to the list.
+  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+  node->set_next(deoptimizing_code_list_);
+  deoptimizing_code_list_ = node;
+
+  // Set the code for the function to non-optimized version.
+  function->ReplaceCode(function->shared()->code());
+
+  if (FLAG_trace_deopt) {
+    PrintF("[forced deoptimization: ");
+    function->PrintName();
+    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+  }
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  // The stack check code matches the pattern (on ia32, for example):
+  //
+  //     cmp esp, <limit>
+  //     jae ok
+  //     call <stack guard>
+  // ok: ...
+  //
+  // We will patch the code to:
+  //
+  //     cmp esp, <limit>  ;; Not changed
+  //     nop
+  //     nop
+  //     call <on-stack replacment>
+  // ok:
+  Address call_target_address = rinfo->pc();
+  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
+         *(call_target_address - 2) == 0x05 &&  // offset
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x90;  // nop
+  *(call_target_address - 2) = 0x90;  // nop
+  rinfo->set_target_address(replacement_code->entry());
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  Address call_target_address = rinfo->pc();
+  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
+         *(call_target_address - 2) == 0x90 &&  // nop
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x73;  // jae
+  *(call_target_address - 2) = 0x05;  // offset
+  rinfo->set_target_address(check_code->entry());
+}
+
+
+static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
+  ByteArray* translations = data->TranslationByteArray();
+  int length = data->DeoptCount();
+  for (int i = 0; i < length; i++) {
+    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
+      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
+      int value = it.Next();
+      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
+      // Read the number of frames.
+      value = it.Next();
+      if (value == 1) return i;
+    }
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  DeoptimizationInputData* data = DeoptimizationInputData::cast(
+      optimized_code_->deoptimization_data());
+  unsigned ast_id = data->OsrAstId()->value();
+  // TODO(kasperl): This should not be the bailout_id_. It should be
+  // the ast id. Confusing.
+  ASSERT(bailout_id_ == ast_id);
+
+  int bailout_id = LookupBailoutId(data, ast_id);
+  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
+  ByteArray* translations = data->TranslationByteArray();
+
+  TranslationIterator iterator(translations, translation_index);
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator.Next());
+  ASSERT(Translation::BEGIN == opcode);
+  USE(opcode);
+  int count = iterator.Next();
+  ASSERT(count == 1);
+  USE(count);
+
+  opcode = static_cast<Translation::Opcode>(iterator.Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  unsigned node_id = iterator.Next();
+  USE(node_id);
+  ASSERT(node_id == ast_id);
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
+  USE(function);
+  ASSERT(function == function_);
+  unsigned height = iterator.Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  USE(height_in_bytes);
+
+  unsigned fixed_size = ComputeFixedSize(function_);
+  unsigned input_frame_size = input_->GetFrameSize();
+  ASSERT(fixed_size + height_in_bytes == input_frame_size);
+
+  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
+  unsigned outgoing_size = outgoing_height * kPointerSize;
+  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
+  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
+    PrintF(" => node=%u, frame=%d->%d]\n",
+           ast_id,
+           input_frame_size,
+           output_frame_size);
+  }
+
+  // There's only one output frame in the OSR case.
+  output_count_ = 1;
+  output_ = new FrameDescription*[1];
+  output_[0] = new(output_frame_size) FrameDescription(
+      output_frame_size, function_);
+
+  // Clear the incoming parameters in the optimized frame to avoid
+  // confusing the garbage collector.
+  unsigned output_offset = output_frame_size - kPointerSize;
+  int parameter_count = function_->shared()->formal_parameter_count() + 1;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_[0]->SetFrameSlot(output_offset, 0);
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the incoming parameters. This may overwrite some of the
+  // incoming argument slots we've just cleared.
+  int input_offset = input_frame_size - kPointerSize;
+  bool ok = true;
+  int limit = input_offset - (parameter_count * kPointerSize);
+  while (ok && input_offset > limit) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Set them up explicitly.
+  for (int i = 0; ok && i < 4; i++) {
+    uint32_t input_value = input_->GetFrameSlot(input_offset);
+    if (FLAG_trace_osr) {
+      PrintF("    [esp + %d] <- 0x%08x ; [esp + %d] (fixed part)\n",
+             output_offset,
+             input_value,
+             input_offset);
+    }
+    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
+    input_offset -= kPointerSize;
+    output_offset -= kPointerSize;
+  }
+
+  // Translate the rest of the frame.
+  while (ok && input_offset >= 0) {
+    ok = DoOsrTranslateCommand(&iterator, &input_offset);
+  }
+
+  // If translation of any command failed, continue using the input frame.
+  if (!ok) {
+    delete output_[0];
+    output_[0] = input_;
+    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
+  } else {
+    // Setup the frame pointer and the context pointer.
+    output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+    output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
+
+    unsigned pc_offset = data->OsrPcOffset()->value();
+    uint32_t pc = reinterpret_cast<uint32_t>(
+        optimized_code_->entry() + pc_offset);
+    output_[0]->SetPc(pc);
+  }
+  Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
+  output_[0]->SetContinuation(
+      reinterpret_cast<uint32_t>(continuation->entry()));
+
+  if (FLAG_trace_osr) {
+    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
+           ok ? "finished" : "aborted",
+           reinterpret_cast<intptr_t>(function));
+    function->PrintName();
+    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
+  }
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
+  int node_id = iterator->Next();
+  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  unsigned height = iterator->Next();
+  unsigned height_in_bytes = height * kPointerSize;
+  if (FLAG_trace_deopt) {
+    PrintF("  translating ");
+    function->PrintName();
+    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+  }
+
+  // The 'fixed' part of the frame consists of the incoming parameters and
+  // the part described by JavaScriptFrameConstants.
+  unsigned fixed_frame_size = ComputeFixedSize(function);
+  unsigned input_frame_size = input_->GetFrameSize();
+  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+  // Allocate and store the output frame description.
+  FrameDescription* output_frame =
+      new(output_frame_size) FrameDescription(output_frame_size, function);
+
+  bool is_bottommost = (0 == frame_index);
+  bool is_topmost = (output_count_ - 1 == frame_index);
+  ASSERT(frame_index >= 0 && frame_index < output_count_);
+  ASSERT(output_[frame_index] == NULL);
+  output_[frame_index] = output_frame;
+
+  // The top address for the bottommost output frame can be computed from
+  // the input frame pointer and the output frame's height.  For all
+  // subsequent output frames, it can be computed from the previous one's
+  // top address and the current frame's size.
+  uint32_t top_address;
+  if (is_bottommost) {
+    // 2 = context and function in the frame.
+    top_address =
+        input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+  } else {
+    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+  }
+  output_frame->SetTop(top_address);
+
+  // Compute the incoming parameter translation.
+  int parameter_count = function->shared()->formal_parameter_count() + 1;
+  unsigned output_offset = output_frame_size;
+  unsigned input_offset = input_frame_size;
+  for (int i = 0; i < parameter_count; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  input_offset -= (parameter_count * kPointerSize);
+
+  // There are no translation commands for the caller's pc and fp, the
+  // context, and the function.  Synthesize their values and set them up
+  // explicitly.
+  //
+  // The caller's pc for the bottommost output frame is the same as in the
+  // input frame.  For all subsequent output frames, it can be read from the
+  // previous one.  This frame's pc can be computed from the non-optimized
+  // function code and AST id of the bailout.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  intptr_t value;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetPc();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The caller's frame pointer for the bottommost output frame is the same
+  // as in the input frame.  For all subsequent output frames, it can be
+  // read from the previous one.  Also compute and set this frame's frame
+  // pointer.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  if (is_bottommost) {
+    value = input_->GetFrameSlot(input_offset);
+  } else {
+    value = output_[frame_index - 1]->GetFp();
+  }
+  output_frame->SetFrameSlot(output_offset, value);
+  intptr_t fp_value = top_address + output_offset;
+  ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+  output_frame->SetFp(fp_value);
+  if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+           fp_value, output_offset, value);
+  }
+
+  // The context can be gotten from the function so long as we don't
+  // optimize functions that need local contexts.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function->context());
+  // The context for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (is_topmost) output_frame->SetRegister(esi.code(), value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // The function was mentioned explicitly in the BEGIN_FRAME.
+  output_offset -= kPointerSize;
+  input_offset -= kPointerSize;
+  value = reinterpret_cast<uint32_t>(function);
+  // The function for the bottommost output frame should also agree with the
+  // input frame.
+  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  output_frame->SetFrameSlot(output_offset, value);
+  if (FLAG_trace_deopt) {
+    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
+           top_address + output_offset, output_offset, value);
+  }
+
+  // Translate the rest of the frame.
+  for (unsigned i = 0; i < height; ++i) {
+    output_offset -= kPointerSize;
+    DoTranslateCommand(iterator, frame_index, output_offset);
+  }
+  ASSERT(0 == output_offset);
+
+  // Compute this frame's PC, state, and continuation.
+  Code* non_optimized_code = function->shared()->code();
+  FixedArray* raw_data = non_optimized_code->deoptimization_data();
+  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+  Address start = non_optimized_code->instruction_start();
+  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  FullCodeGenerator::State state =
+      FullCodeGenerator::StateField::decode(pc_and_state);
+  output_frame->SetState(Smi::FromInt(state));
+
+  // Set the continuation for the topmost frame.
+  if (is_topmost) {
+    Code* continuation = (bailout_type_ == EAGER)
+        ? Builtins::builtin(Builtins::NotifyDeoptimized)
+        : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+    output_frame->SetContinuation(
+        reinterpret_cast<uint32_t>(continuation->entry()));
+  }
+
+  if (output_count_ - 1 == frame_index) iterator->Done();
+}
+
+
+#define __ masm()->
+
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+  CpuFeatures::Scope scope(SSE2);
+
+  // Save all general purpose registers before messing with them.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  const int kDoubleRegsSize = kDoubleSize *
+                              XMMRegister::kNumAllocatableRegisters;
+  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ movdbl(Operand(esp, offset), xmm_reg);
+  }
+
+  __ pushad();
+
+  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+                                      kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object if possible
+  // and compute the fp-to-sp delta in register edx.
+  if (type() == EAGER) {
+    __ Set(ecx, Immediate(0));
+    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+  } else {
+    __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+    __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+  }
+  __ sub(edx, Operand(ebp));
+  __ neg(edx);
+
+  // Allocate a new deoptimizer object.
+  __ PrepareCallCFunction(5, eax);
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
+  __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
+  __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
+  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Code address or 0.
+  __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+
+  // Preserve deoptimizer object in register eax and get the input
+  // frame descriptor pointer.
+  __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
+
+  // Fill in the input registers.
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize));
+    __ mov(Operand(ebx, offset), ecx);
+  }
+
+  // Fill in the double input registers.
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ movdbl(xmm0, Operand(esp, src_offset));
+    __ movdbl(Operand(ebx, dst_offset), xmm0);
+  }
+
+  // Remove the bailout id and the general purpose registers from the stack.
+  if (type() == EAGER) {
+    __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize));
+  } else {
+    __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize));
+  }
+
+  // Compute a pointer to the unwinding limit in register ecx; that is
+  // the first stack slot not part of the input frame.
+  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ add(ecx, Operand(esp));
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  __ bind(&pop_loop);
+  __ pop(Operand(edx, 0));
+  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, Operand(esp));
+  __ j(not_equal, &pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(eax);
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0 * kPointerSize), eax);
+  __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+  __ pop(eax);
+
+  // Replace the current frame with the output frames.
+  Label outer_push_loop, inner_push_loop;
+  // Outer loop state: eax = current FrameDescription**, edx = one past the
+  // last FrameDescription**.
+  __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
+  __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
+  __ lea(edx, Operand(eax, edx, times_4, 0));
+  __ bind(&outer_push_loop);
+  // Inner loop state: ebx = current FrameDescription*, ecx = loop index.
+  __ mov(ebx, Operand(eax, 0));
+  __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+  __ bind(&inner_push_loop);
+  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+  __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+  __ test(ecx, Operand(ecx));
+  __ j(not_zero, &inner_push_loop);
+  __ add(Operand(eax), Immediate(kPointerSize));
+  __ cmp(eax, Operand(edx));
+  __ j(below, &outer_push_loop);
+
+  // In case of OSR, we have to restore the XMM registers.
+  if (type() == OSR) {
+    for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+      XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+      int src_offset = i * kDoubleSize + double_regs_offset;
+      __ movdbl(xmm_reg, Operand(ebx, src_offset));
+    }
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  if (type() != OSR) {
+    __ push(Operand(ebx, FrameDescription::state_offset()));
+  }
+  __ push(Operand(ebx, FrameDescription::pc_offset()));
+  __ push(Operand(ebx, FrameDescription::continuation_offset()));
+
+
+  // Push the registers from the last output frame.
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kIntSize) + FrameDescription::registers_offset();
+    __ push(Operand(ebx, offset));
+  }
+
+  // Restore the registers from the stack.
+  __ popad();
+
+  // Return to the continuation point.
+  __ ret(0);
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  // Create a sequence of deoptimization entries.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    __ push_imm32(i);
+    __ jmp(&done);
+    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+}
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 52c2b38..dfbcbb7 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -733,7 +733,9 @@
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
             case 0xEB: mnem = "fldpi"; break;
+            case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF1: mnem = "fyl2x"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
@@ -1105,6 +1107,21 @@
             } else {
               UnimplementedInstruction();
             }
+          } else if (*data == 0x3A) {
+            data++;
+            if (*data == 0x16) {
+              data++;
+              int mod, regop, rm;
+              get_modrm(*data, &mod, &regop, &rm);
+              int8_t imm8 = static_cast<int8_t>(data[1]);
+              AppendToBuffer("pextrd %s,%s,%d",
+                             NameOfXMMRegister(regop),
+                             NameOfXMMRegister(rm),
+                             static_cast<int>(imm8));
+              data += 2;
+            } else {
+              UnimplementedInstruction();
+            }
           } else if (*data == 0x2E || *data == 0x2F) {
             const char* mnem = (*data == 0x2E) ? "ucomisd" : "comisd";
             data++;
@@ -1127,6 +1144,14 @@
                            NameOfCPURegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (*data == 0x54) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("andpd %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0x57) {
             data++;
             int mod, regop, rm;
@@ -1147,6 +1172,25 @@
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
             data += PrintRightOperand(data);
+          } else if (*data == 0x70) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            AppendToBuffer("pshufd %s,%s,%d",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm),
+                           static_cast<int>(imm8));
+            data += 2;
+          } else if (*data == 0x73) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            AppendToBuffer("psllq %s,%d",
+                           NameOfXMMRegister(rm),
+                           static_cast<int>(imm8));
+            data += 2;
           } else if (*data == 0x7F) {
             AppendToBuffer("movdqa ");
             data++;
@@ -1154,6 +1198,21 @@
             get_modrm(*data, &mod, &regop, &rm);
             data += PrintRightOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (*data == 0x7E) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("movd ");
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfXMMRegister(regop));
+          } else if (*data == 0xDB) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("pand %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else if (*data == 0xE7) {
             AppendToBuffer("movntdq ");
             data++;
@@ -1162,30 +1221,13 @@
             data += PrintRightOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
           } else if (*data == 0xEF) {
-             data++;
-             int mod, regop, rm;
-             get_modrm(*data, &mod, &regop, &rm);
-             AppendToBuffer("pxor %s,%s",
-                            NameOfXMMRegister(regop),
-                            NameOfXMMRegister(rm));
-             data++;
-          } else if (*data == 0x73) {
-             data++;
-             int mod, regop, rm;
-             get_modrm(*data, &mod, &regop, &rm);
-             int8_t imm8 = static_cast<int8_t>(data[1]);
-             AppendToBuffer("psllq %s,%d",
-                            NameOfXMMRegister(rm),
-                            static_cast<int>(imm8));
-             data += 2;
-          } else if (*data == 0x54) {
-             data++;
-             int mod, regop, rm;
-             get_modrm(*data, &mod, &regop, &rm);
-             AppendToBuffer("andpd %s,%s",
-                            NameOfXMMRegister(regop),
-                            NameOfXMMRegister(rm));
-             data++;
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("pxor %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
           } else {
             UnimplementedInstruction();
           }
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index c3fe6c7..8084694 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -49,6 +49,10 @@
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+
+// Number of registers for which space is reserved in safepoints.
+static const int kNumSafepointRegisters = 8;
+
 // ----------------------------------------------------
 
 
@@ -90,6 +94,7 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
+  static const int kFixedFrameSize    =  4;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 8d6942d..13a1177 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -41,8 +41,61 @@
 namespace v8 {
 namespace internal {
 
+
 #define __ ACCESS_MASM(masm_)
 
+
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm)
+      : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    ASSERT(patch_site_.is_bound() == info_emitted_);
+  }
+
+  void EmitJumpIfNotSmi(Register reg, NearLabel* target) {
+    __ test(reg, Immediate(kSmiTagMask));
+    EmitJump(not_carry, target);   // Always taken before patched.
+  }
+
+  void EmitJumpIfSmi(Register reg, NearLabel* target) {
+    __ test(reg, Immediate(kSmiTagMask));
+    EmitJump(carry, target);  // Never taken before patched.
+  }
+
+  void EmitPatchInfo() {
+    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+    ASSERT(is_int8(delta_to_patch_site));
+    __ test(eax, Immediate(delta_to_patch_site));
+#ifdef DEBUG
+    info_emitted_ = true;
+#endif
+  }
+
+  bool is_bound() const { return patch_site_.is_bound(); }
+
+ private:
+  // jc will be patched with jz, jnc will become jnz.
+  void EmitJump(Condition cc, NearLabel* target) {
+    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    ASSERT(cc == carry || cc == not_carry);
+    __ bind(&patch_site_);
+    __ j(cc, target);
+  }
+
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
@@ -168,7 +221,12 @@
     }
   }
 
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailout(info->function(), NO_REGISTERS);
     NearLabel ok;
     ExternalReference stack_limit =
         ExternalReference::address_of_stack_limit();
@@ -179,10 +237,6 @@
     __ bind(&ok);
   }
 
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
     VisitStatements(function()->body());
@@ -202,6 +256,27 @@
 }
 
 
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  NearLabel ok;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, taken);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
+  // Loop stack checks can be patched to perform on-stack
+  // replacement. In order to decide whether or not to perform OSR we
+  // embed the loop depth in a test instruction after the call so we
+  // can extract it from the OSR builtin.
+  ASSERT(loop_depth() > 0);
+  __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+}
+
+
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
@@ -218,7 +293,7 @@
     Label check_exit_codesize;
     masm_->bind(&check_exit_codesize);
 #endif
-    CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+    SetSourcePosition(function()->end_position() - 1);
     __ RecordJSReturn();
     // Do not use the leave instruction here because it is too short to
     // patch with the code required by the debugger.
@@ -271,6 +346,7 @@
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   // For simplicity we always test the accumulator register.
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -314,22 +390,26 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -369,13 +449,14 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -408,8 +489,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -432,6 +513,10 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (flag) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
@@ -523,6 +608,32 @@
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  NearLabel skip;
+  if (should_normalize) __ jmp(&skip);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
+  if (should_normalize) {
+    __ cmp(eax, Factory::true_value());
+    Split(equal, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -633,8 +744,10 @@
   Comment cmnt(masm_, "[ SwitchStatement");
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
+
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
 
   ZoneList<CaseClause*>* clauses = stmt->cases();
   CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
@@ -659,12 +772,13 @@
     // Perform the comparison as if via '==='.
     __ mov(edx, Operand(esp, 0));  // Switch value.
     bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
     if (inline_smi_code) {
       NearLabel slow_case;
       __ mov(ecx, edx);
       __ or_(ecx, Operand(eax));
-      __ test(ecx, Immediate(kSmiTagMask));
-      __ j(not_zero, &slow_case, not_taken);
+      patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
+
       __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
@@ -672,11 +786,11 @@
       __ bind(&slow_case);
     }
 
-    CompareFlags flags = inline_smi_code
-        ? NO_SMI_COMPARE_IN_STUB
-        : NO_COMPARE_FLAGS;
-    CompareStub stub(equal, true, flags);
-    __ CallStub(&stub);
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+    EmitCallIC(ic, &patch_site);
+
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
@@ -702,6 +816,7 @@
   }
 
   __ bind(nested_statement.break_target());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -853,27 +968,20 @@
   __ bind(&update_each);
   __ mov(result_register(), ebx);
   // Perform the assignment as if via '='.
-  EmitAssignment(stmt->each());
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit;
-  NearLabel stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
-  __ jmp(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ jmp(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -888,8 +996,14 @@
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (scope()->is_function_scope() &&
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      scope()->is_function_scope() &&
       info->num_literals() == 0 &&
       !pretenure) {
     FastNewClosureStub stub;
@@ -1235,12 +1349,15 @@
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
-          VisitForAccumulatorValue(value);
-          __ mov(ecx, Immediate(key->handle()));
-          __ mov(edx, Operand(esp, 0));
           if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(ecx, Immediate(key->handle()));
+            __ mov(edx, Operand(esp, 0));
             Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
             EmitCallIC(ic, RelocInfo::CODE_TARGET);
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
           }
           break;
         }
@@ -1288,6 +1405,7 @@
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->constant_elements()));
   if (expr->constant_elements()->map() == Heap::fixed_cow_array_map()) {
+    ASSERT(expr->depth() == 1);
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
     __ CallStub(&stub);
@@ -1329,6 +1447,8 @@
 
     // Update the write barrier for the array store.
     __ RecordWrite(ebx, offset, result_register(), ecx);
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
 
   if (result_saved) {
@@ -1373,17 +1493,30 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case KEYED_PROPERTY:
+    case KEYED_PROPERTY: {
       if (expr->is_compound()) {
-        VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+          __ mov(eax, Immediate(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForAccumulatorValue(property->key());
+        }
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
       } else {
-        VisitForStackValue(property->obj());
-        VisitForStackValue(property->key());
+        if (property->is_arguments_access()) {
+          VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+          __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+          __ push(Immediate(property->key()->AsLiteral()->handle()));
+        } else {
+          VisitForStackValue(property->obj());
+          VisitForStackValue(property->key());
+        }
       }
       break;
+    }
   }
 
   if (expr->is_compound()) {
@@ -1401,6 +1534,12 @@
       }
     }
 
+    // For property compound assignments we need another deoptimization
+    // point after the property load.
+    if (property != NULL) {
+      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
+    }
+
     Token::Value op = expr->binary_op();
     ConstantOperand constant = ShouldInlineSmiCase(op)
         ? GetConstantOperand(op, expr->target(), expr->value())
@@ -1426,6 +1565,9 @@
     } else {
       EmitBinaryOp(op, mode);
     }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
   } else {
     VisitForAccumulatorValue(expr->value());
   }
@@ -1438,6 +1580,8 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(eax);
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1469,26 +1613,25 @@
                                            OverwriteMode mode,
                                            bool left_is_constant_smi,
                                            Smi* value) {
-  NearLabel call_stub;
-  Label done;
+  NearLabel call_stub, done;
   __ add(Operand(eax), Immediate(value));
   __ j(overflow, &call_stub);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &done);
 
   // Undo the optimistic add operation and call the shared stub.
   __ bind(&call_stub);
   __ sub(Operand(eax), Immediate(value));
   Token::Value op = Token::ADD;
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+  TypeRecordingBinaryOpStub stub(op, mode);
   if (left_is_constant_smi) {
-    __ push(Immediate(value));
-    __ push(eax);
+    __ mov(edx, Immediate(value));
   } else {
-    __ push(eax);
-    __ push(Immediate(value));
+    __ mov(edx, eax);
+    __ mov(eax, Immediate(value));
   }
-  __ CallStub(&stub);
+  EmitCallIC(stub.GetCode(), &patch_site);
+
   __ bind(&done);
   context()->Plug(eax);
 }
@@ -1498,7 +1641,7 @@
                                            OverwriteMode mode,
                                            bool left_is_constant_smi,
                                            Smi* value) {
-  Label call_stub, done;
+  NearLabel call_stub, done;
   if (left_is_constant_smi) {
     __ mov(ecx, eax);
     __ mov(eax, Immediate(value));
@@ -1507,24 +1650,22 @@
     __ sub(Operand(eax), Immediate(value));
   }
   __ j(overflow, &call_stub);
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &done);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &done);
 
   __ bind(&call_stub);
-  if (left_is_constant_smi)  {
-    __ push(Immediate(value));
-    __ push(ecx);
+  if (left_is_constant_smi) {
+    __ mov(edx, Immediate(value));
+    __ mov(eax, ecx);
   } else {
-    // Undo the optimistic sub operation.
-    __ add(Operand(eax), Immediate(value));
-
-    __ push(eax);
-    __ push(Immediate(value));
+    __ add(Operand(eax), Immediate(value));  // Undo the subtraction.
+    __ mov(edx, eax);
+    __ mov(eax, Immediate(value));
   }
-
   Token::Value op = Token::SUB;
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  __ CallStub(&stub);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
+
   __ bind(&done);
   context()->Plug(eax);
 }
@@ -1534,19 +1675,21 @@
                                                Token::Value op,
                                                OverwriteMode mode,
                                                Smi* value) {
-  Label call_stub, smi_case, done;
+  NearLabel call_stub, smi_case, done;
   int shift_value = value->value() & 0x1f;
 
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi_case);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &smi_case);
 
+  // Call stub.
   __ bind(&call_stub);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  __ push(eax);
-  __ push(Immediate(value));
-  __ CallStub(&stub);
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(value));
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);
 
+  // Smi case.
   __ bind(&smi_case);
   switch (op) {
     case Token::SHL:
@@ -1596,18 +1739,19 @@
                                              Token::Value op,
                                              OverwriteMode mode,
                                              Smi* value) {
-  Label smi_case, done;
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi_case);
+  NearLabel smi_case, done;
 
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &smi_case);
+
   // The order of the arguments does not matter for bit-ops with a
   // constant operand.
-  __ push(Immediate(value));
-  __ push(eax);
-  __ CallStub(&stub);
+  __ mov(edx, Immediate(value));
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);
 
+  // Smi case.
   __ bind(&smi_case);
   switch (op) {
     case Token::BIT_OR:
@@ -1675,24 +1819,20 @@
 
   // Do combined smi check of the operands. Left operand is on the
   // stack. Right operand is in eax.
-  Label done, stub_call, smi_case;
+  NearLabel done, smi_case, stub_call;
   __ pop(edx);
   __ mov(ecx, eax);
   __ or_(eax, Operand(edx));
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &smi_case);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(eax, &smi_case);
 
   __ bind(&stub_call);
-  GenericBinaryOpStub stub(op, mode, NO_SMI_CODE_IN_STUB, TypeInfo::Unknown());
-  if (stub.ArgsInRegistersSupported()) {
-    stub.GenerateCall(masm_, edx, ecx);
-  } else {
-    __ push(edx);
-    __ push(ecx);
-    __ CallStub(&stub);
-  }
+  __ mov(eax, ecx);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ jmp(&done);
 
+  // Smi case.
   __ bind(&smi_case);
   __ mov(eax, edx);  // Copy left operand in case of a stub call.
 
@@ -1769,20 +1909,14 @@
 
 void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      OverwriteMode mode) {
-  TypeInfo type = TypeInfo::Unknown();
-  GenericBinaryOpStub stub(op, mode, NO_GENERIC_BINARY_FLAGS, type);
-  if (stub.ArgsInRegistersSupported()) {
-    __ pop(edx);
-    stub.GenerateCall(masm_, edx, eax);
-  } else {
-    __ push(result_register());
-    __ CallStub(&stub);
-  }
+  __ pop(edx);
+  TypeRecordingBinaryOpStub stub(op, mode);
+  EmitCallIC(stub.GetCode(), NULL);  // NULL signals no inlined smi code.
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1830,6 +1964,8 @@
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
+  context()->Plug(eax);
 }
 
 
@@ -1902,8 +2038,6 @@
     }
     __ bind(&done);
   }
-
-  context()->Plug(eax);
 }
 
 
@@ -1940,10 +2074,10 @@
     __ push(Operand(esp, kPointerSize));  // Receiver is under value.
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
-    context()->DropAndPlug(1, eax);
-  } else {
-    context()->Plug(eax);
+    __ Drop(1);
   }
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(eax);
 }
 
 
@@ -1981,6 +2115,7 @@
     __ pop(eax);
   }
 
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
 }
 
@@ -1992,13 +2127,14 @@
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(eax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(eax);
   }
-  context()->Plug(eax);
 }
 
 
@@ -2008,17 +2144,18 @@
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
     __ Set(ecx, Immediate(name));
   }
   // Record source position of the IC call.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->Plug(eax);
@@ -2040,17 +2177,18 @@
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position of the IC call.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
   EmitCallIC(ic, mode);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
@@ -2061,16 +2199,17 @@
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
+  RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   context()->DropAndPlug(1, eax);
@@ -2078,6 +2217,12 @@
 
 
 void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -2089,7 +2234,7 @@
     // arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
-    { PreserveStatementPositionScope pos_scope(masm()->positions_recorder());
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
       VisitForStackValue(fun);
       // Reserved receiver slot.
       __ push(Immediate(Factory::undefined_value()));
@@ -2119,10 +2264,11 @@
       __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
     }
     // Record source position for debugger.
-    SetSourcePosition(expr->position(), FORCED_POSITION);
+    SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
     __ CallStub(&stub);
+    RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     context()->DropAndPlug(1, eax);
@@ -2135,7 +2281,7 @@
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
       EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
@@ -2181,15 +2327,15 @@
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
       // for a regular property use keyed EmitCallIC.
-      { PreserveStatementPositionScope scope(masm()->positions_recorder());
+      { PreservePositionScope scope(masm()->positions_recorder());
         VisitForStackValue(prop->obj());
       }
       if (prop->is_synthetic()) {
-        { PreserveStatementPositionScope scope(masm()->positions_recorder());
+        { PreservePositionScope scope(masm()->positions_recorder());
           VisitForAccumulatorValue(prop->key());
         }
         // Record source code position for IC call.
-        SetSourcePosition(prop->position(), FORCED_POSITION);
+        SetSourcePosition(prop->position());
         __ pop(edx);  // We do not need to keep the receiver.
 
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
@@ -2214,7 +2360,7 @@
         loop_depth() == 0) {
       lit->set_try_full_codegen(true);
     }
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
     // Load global receiver object.
@@ -2223,6 +2369,11 @@
     // Emit function call.
     EmitCallWithStub(expr);
   }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  ASSERT(expr->return_is_recorded_);
+#endif
 }
 
 
@@ -2270,6 +2421,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2289,6 +2441,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2321,6 +2474,7 @@
   __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2342,6 +2496,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2365,6 +2520,7 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2384,9 +2540,9 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  // TODO(3110205): Implement this.
+  // Currently unimplemented.  Emit false, a safe choice.
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
@@ -2407,6 +2563,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2428,6 +2585,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2449,6 +2607,7 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(equal, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2480,6 +2639,7 @@
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2502,6 +2662,7 @@
 
   __ pop(ebx);
   __ cmp(eax, Operand(ebx));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2719,7 +2880,9 @@
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  __ CallRuntime(Runtime::kMath_pow, 2);
+
+  MathPowStub stub;
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -2904,7 +3067,8 @@
 
 void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::TAGGED);
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2914,7 +3078,19 @@
 
 void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::TAGGED);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::TAGGED);
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -2951,11 +3127,13 @@
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(eax);
 }
 
@@ -2965,7 +3143,71 @@
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
+  Label done;
+  Label slow_case;
+  Register object = eax;
+  Register index_1 = ebx;
+  Register index_2 = ecx;
+  Register elements = edi;
+  Register temp = edx;
+  __ mov(object, Operand(esp, 2 * kPointerSize));
+  // Fetch the map and check if array is in fast case.
+  // Check that object doesn't require security checks and
+  // has no indexed interceptor.
+  __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
+  __ j(below, &slow_case);
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            KeyedLoadIC::kSlowCaseBitFieldMask);
+  __ j(not_zero, &slow_case);
+
+  // Check the object's elements are in fast case and writable.
+  __ mov(elements, FieldOperand(object, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &slow_case);
+
+  // Check that both indices are smis.
+  __ mov(index_1, Operand(esp, 1 * kPointerSize));
+  __ mov(index_2, Operand(esp, 0));
+  __ mov(temp, index_1);
+  __ or_(temp, Operand(index_2));
+  __ test(temp, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow_case);
+
+  // Check that both indices are valid.
+  __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
+  __ cmp(temp, Operand(index_1));
+  __ j(below_equal, &slow_case);
+  __ cmp(temp, Operand(index_2));
+  __ j(below_equal, &slow_case);
+
+  // Bring addresses into index1 and index2.
+  __ lea(index_1, CodeGenerator::FixedArrayElementOperand(elements, index_1));
+  __ lea(index_2, CodeGenerator::FixedArrayElementOperand(elements, index_2));
+
+  // Swap elements.  Use object and temp as scratch registers.
+  __ mov(object, Operand(index_1, 0));
+  __ mov(temp,   Operand(index_2, 0));
+  __ mov(Operand(index_2, 0), object);
+  __ mov(Operand(index_1, 0), temp);
+
+  Label new_space;
+  __ InNewSpace(elements, temp, equal, &new_space);
+
+  __ mov(object, elements);
+  __ RecordWriteHelper(object, index_1, temp);
+  __ RecordWriteHelper(elements, index_2, temp);
+
+  __ bind(&new_space);
+  // We are done. Drop elements from the stack, and return undefined.
+  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ mov(eax, Factory::undefined_value());
+  __ jmp(&done);
+
+  __ bind(&slow_case);
   __ CallRuntime(Runtime::kSwapElements, 3);
+
+  __ bind(&done);
   context()->Plug(eax);
 }
 
@@ -3073,6 +3315,7 @@
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -3377,6 +3620,7 @@
       // Notice that the labels are swapped.
       context()->PrepareTest(&materialize_true, &materialize_false,
                              &if_false, &if_true, &fall_through);
+      if (context()->IsTest()) ForwardBailoutToChild(expr);
       VisitForControl(expr->expression(), if_true, if_false, fall_through);
       context()->Plug(if_false, if_true);  // Labels swapped.
       break;
@@ -3493,14 +3737,24 @@
       __ push(eax);
       EmitNamedPropertyLoad(prop);
     } else {
-      VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
+      if (prop->is_arguments_access()) {
+        VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
+        __ push(EmitSlotSearch(obj_proxy->var()->AsSlot(), ecx));
+        __ mov(eax, Immediate(prop->key()->AsLiteral()->handle()));
+      } else {
+        VisitForStackValue(prop->obj());
+        VisitForAccumulatorValue(prop->key());
+      }
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
       EmitKeyedPropertyLoad(prop);
     }
   }
 
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  PrepareForBailout(expr->increment(), TOS_REG);
+
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
   if (ShouldInlineSmiCase(expr->op())) {
@@ -3532,8 +3786,9 @@
   }
 
   // Inline smi case if we are in a loop.
-  NearLabel stub_call;
-  Label done;
+  NearLabel stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
       __ add(Operand(eax), Immediate(Smi::FromInt(1)));
@@ -3543,8 +3798,8 @@
     __ j(overflow, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    __ test(eax, Immediate(kSmiTagMask));
-    __ j(zero, &done);
+    patch_site.EmitJumpIfSmi(eax, &done);
+
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
@@ -3553,12 +3808,16 @@
       __ add(Operand(eax), Immediate(Smi::FromInt(1)));
     }
   }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   // Call stub for +1/-1.
-  GenericBinaryOpStub stub(expr->binary_op(),
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Unknown());
-  stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(Smi::FromInt(1)));
+  TypeRecordingBinaryOpStub stub(expr->binary_op(),
+                                 NO_OVERWRITE);
+  EmitCallIC(stub.GetCode(), &patch_site);
   __ bind(&done);
 
   // Store the value returned in eax.
@@ -3569,6 +3828,8 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(eax);
         }
         // For all contexts except EffectContext We have the result on
         // top of the stack.
@@ -3579,6 +3840,8 @@
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(eax);
       }
       break;
     case NAMED_PROPERTY: {
@@ -3586,6 +3849,7 @@
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
           context()->PlugTOS();
@@ -3600,6 +3864,7 @@
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         // Result is on the stack
         if (!context()->IsEffect()) {
@@ -3627,6 +3892,7 @@
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     EmitCallIC(ic, RelocInfo::CODE_TARGET);
+    PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
   } else if (proxy != NULL &&
              proxy->var()->AsSlot() != NULL &&
@@ -3642,12 +3908,13 @@
     __ push(esi);
     __ push(Immediate(proxy->name()));
     __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    Visit(expr);
+    context()->HandleExpression(expr);
   }
 }
 
@@ -3672,6 +3939,7 @@
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(left_unary->expression());
   }
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(Heap::number_symbol())) {
     __ test(eax, Immediate(kSmiTagMask));
@@ -3767,14 +4035,17 @@
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ cmp(eax, Factory::true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
 
     case Token::INSTANCEOF: {
       VisitForStackValue(expr->right());
-      InstanceofStub stub;
+      __ IncrementCounter(&Counters::instance_of_full, 1);
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -3820,22 +4091,23 @@
       }
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
         NearLabel slow_case;
         __ mov(ecx, Operand(edx));
         __ or_(ecx, Operand(eax));
-        __ test(ecx, Immediate(kSmiTagMask));
-        __ j(not_zero, &slow_case, not_taken);
+        patch_site.EmitJumpIfNotSmi(ecx, &slow_case);
         __ cmp(edx, Operand(eax));
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
 
-      CompareFlags flags = inline_smi_code
-          ? NO_SMI_COMPARE_IN_STUB
-          : NO_COMPARE_FLAGS;
-      CompareStub stub(cc, strict, flags);
-      __ CallStub(&stub);
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CompareIC::GetUninitialized(op);
+      EmitCallIC(ic, &patch_site);
+
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ test(eax, Operand(eax));
       Split(cc, if_true, if_false, fall_through);
     }
@@ -3856,6 +4128,8 @@
                          &if_true, &if_false, &fall_through);
 
   VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
   __ cmp(eax, Factory::null_value());
   if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
@@ -3894,8 +4168,31 @@
 void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
   ASSERT(mode == RelocInfo::CODE_TARGET ||
          mode == RelocInfo::CODE_TARGET_CONTEXT);
+  switch (ic->kind()) {
+    case Code::LOAD_IC:
+      __ IncrementCounter(&Counters::named_load_full, 1);
+      break;
+    case Code::KEYED_LOAD_IC:
+      __ IncrementCounter(&Counters::keyed_load_full, 1);
+      break;
+    case Code::STORE_IC:
+      __ IncrementCounter(&Counters::named_store_full, 1);
+      break;
+    case Code::KEYED_STORE_IC:
+      __ IncrementCounter(&Counters::keyed_store_full, 1);
+    default:
+      break;
+  }
+
   __ call(ic, mode);
 
+  // Crankshaft doesn't need patching of inlined loads and stores.
+  // When compiling the snapshot we need to produce code that works
+  // with and without Crankshaft.
+  if (V8::UseCrankshaft() && !Serializer::enabled()) {
+    return;
+  }
+
   // If we're calling a (keyed) load or store stub, we have to mark
   // the call as containing no inlined code so we will not attempt to
   // patch it.
@@ -3913,6 +4210,16 @@
 }
 
 
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
+  __ call(ic, RelocInfo::CODE_TARGET);
+  if (patch_site != NULL && patch_site->is_bound()) {
+    patch_site->EmitPatchInfo();
+  } else {
+    __ nop();  // Signals no inlined code.
+  }
+}
+
+
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ mov(Operand(ebp, frame_offset), value);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index ddfbb91..9c9304d 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -108,9 +108,6 @@
                                            Register name,
                                            Register r0,
                                            Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
@@ -713,7 +710,7 @@
   char_at_generator.GenerateFast(masm);
   __ ret(0);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -1552,14 +1549,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  // Check if the name is a string.
-  Label miss;
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(zero, &miss);
-  Condition cond = masm->IsObjectStringType(ecx, eax, eax);
-  __ j(NegateCondition(cond), &miss);
   GenerateCallNormal(masm, argc);
-  __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
@@ -1639,16 +1629,15 @@
 }
 
 
-// One byte opcode for test eax,0xXXXXXXXX.
-static const byte kTestEaxByte = 0xA9;
-
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   Address delta_address = test_instruction_address + 1;
   // The delta to the start of the map check instruction.
@@ -1692,6 +1681,8 @@
                                         Object* map,
                                         Object* cell,
                                         bool is_dont_delete) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address mov_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1723,13 +1714,15 @@
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
   // If the instruction following the call is not a test eax, nothing
   // was inlined.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   // Extract the encoded deltas from the test eax instruction.
   Address encoded_offsets_address = test_instruction_address + 1;
@@ -1769,11 +1762,13 @@
 
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
-  if (*test_instruction_address != kTestEaxByte) return false;
+  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
 
   // Fetch the offset from the test instruction to the map cmp
   // instruction.  This offset is stored in the last 4 bytes of the 5
@@ -1969,6 +1964,24 @@
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  __ pop(ebx);
+  __ push(edx);
+  __ push(ecx);
+  __ push(eax);
+  __ push(ebx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 // Defined in ic.cc.
 Object* KeyedStoreIC_Miss(Arguments args);
 
@@ -2010,9 +2023,107 @@
   __ TailCallExternalReference(ref, 3, 1);
 }
 
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+static bool HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+
+  State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+
+  // Activate inlined smi code.
+  if (previous_state == UNINITIALIZED) {
+    PatchInlinedSmiCode(address());
+  }
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  if (*test_instruction_address != Assembler::kTestAlByte) {
+    ASSERT(*test_instruction_address == Assembler::kNopByte);
+    return;
+  }
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int8_t delta = *reinterpret_cast<int8_t*>(delta_address);
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
+           address, test_instruction_address, delta);
+  }
+
+  // Patch with a short conditional jump. There must be a
+  // short jump-if-carry/not-carry at this position.
+  Address jmp_address = test_instruction_address - delta;
+  ASSERT(*jmp_address == Assembler::kJncShortOpcode ||
+         *jmp_address == Assembler::kJcShortOpcode);
+  Condition cc = *jmp_address == Assembler::kJncShortOpcode
+      ? not_zero
+      : zero;
+  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
new file mode 100644
index 0000000..d64f528
--- /dev/null
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -0,0 +1,3276 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-codegen-ia32.h"
+#include "code-stubs.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator : public PostCallGenerator {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     int deoptimization_index)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deoptimization_index_(deoptimization_index) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void Generate() {
+    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  int deoptimization_index_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  HPhase phase("Code generation", chunk());
+  ASSERT(is_unused());
+  status_ = GENERATING;
+  CpuFeatures::Scope scope(SSE2);
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  ASSERT(is_done());
+  code->set_stack_slots(StackSlotCount());
+  code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+void LCodeGen::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(length + 1);
+  memcpy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  ASSERT(is_generating());
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+    __ int3();
+  }
+#endif
+
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS function.
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = StackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ mov(Operand(eax), Immediate(slots));
+      Label loop;
+      __ bind(&loop);
+      __ push(Immediate(kSlotsZapValue));
+      __ dec(eax);
+      __ j(not_zero, &loop);
+    } else {
+      __ sub(Operand(esp), Immediate(slots * kPointerSize));
+    }
+  }
+
+  // Trace the call.
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateBody() {
+  ASSERT(is_generating());
+  bool emit_instructions = true;
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+    if (instr->IsLabel()) {
+      LLabel* label = LLabel::cast(instr);
+      emit_instructions = !label->HasReplacement();
+    }
+
+    if (emit_instructions) {
+      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+      instr->CompileToNative(this);
+    }
+  }
+  return !is_aborted();
+}
+
+
+LInstruction* LCodeGen::GetNextInstruction() {
+  if (current_instruction_ < instructions_->length() - 1) {
+    return instructions_->at(current_instruction_ + 1);
+  } else {
+    return NULL;
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  ASSERT(is_generating());
+  for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+    LDeferredCode* code = deferred_[i];
+    __ bind(code->entry());
+    code->Generate();
+    __ jmp(code->exit());
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  ASSERT(is_done());
+  safepoints_.Emit(masm(), StackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int index) const {
+  return XMMRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  ASSERT(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  ASSERT(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+int LCodeGen::ToInteger32(LConstantOperand* op) const {
+  Handle<Object> value = chunk_->LookupLiteral(op);
+  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
+  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
+      value->Number());
+  return static_cast<int32_t>(value->Number());
+}
+
+
+Immediate LCodeGen::ToImmediate(LOperand* op) {
+  LConstantOperand* const_op = LConstantOperand::cast(op);
+  Handle<Object> literal = chunk_->LookupLiteral(const_op);
+  Representation r = chunk_->LookupLiteralRepresentation(const_op);
+  if (r.IsInteger32()) {
+    ASSERT(literal->IsNumber());
+    return Immediate(static_cast<int32_t>(literal->Number()));
+  } else if (r.IsDouble()) {
+    Abort("unsupported double immediate");
+  }
+  ASSERT(r.IsTagged());
+  return Immediate(literal);
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  if (op->IsRegister()) return Operand(ToRegister(op));
+  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
+  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  int index = op->index();
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return Operand(ebp, -(index + 3) * kPointerSize);
+  } else {
+    // Incoming parameter. Skip the return address.
+    return Operand(ebp, -(index - 1) * kPointerSize);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(Translation* translation,
+                                LOperand* op,
+                                bool is_tagged) {
+  if (op == NULL) {
+    // TODO(twuerthinger): Introduce marker operands to indicate that this value
+    // is not present and must be reconstructed from the deoptimizer. Currently
+    // this is only used for the arguments object.
+    translation->StoreArgumentsObject();
+  } else if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsArgument()) {
+    ASSERT(is_tagged);
+    int src_index = StackSlotCount() + op->index();
+    translation->StoreStackSlot(src_index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    XMMRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(literal);
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  if (instr != NULL) {
+    LPointerMap* pointers = instr->pointer_map();
+    RecordPosition(pointers->position());
+    __ call(code, mode);
+    RegisterLazyDeoptimization(instr);
+  } else {
+    LPointerMap no_pointers(0);
+    RecordPosition(no_pointers.position());
+    __ call(code, mode);
+    RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
+  }
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallRuntime(Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr) {
+  ASSERT(instr != NULL);
+  LPointerMap* pointers = instr->pointer_map();
+  ASSERT(pointers != NULL);
+  RecordPosition(pointers->position());
+
+  __ CallRuntime(function, num_arguments);
+  // Runtime calls to Throw are not supposed to ever return at the
+  // call site, so don't register lazy deoptimization for these. We do
+  // however have to record a safepoint since throwing exceptions can
+  // cause garbage collections.
+  // BUG(3243555): register a lazy deoptimization point at throw. We need
+  // it to be able to inline functions containing a throw statement.
+  if (!instr->IsThrow()) {
+    RegisterLazyDeoptimization(instr);
+  } else {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
+  }
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+  // Create the environment to bailout to. If the call has side effects
+  // execution has to continue after the call otherwise execution can continue
+  // from a previous bailout point repeating the call.
+  LEnvironment* deoptimization_environment;
+  if (instr->HasDeoptimizationEnvironment()) {
+    deoptimization_environment = instr->deoptimization_environment();
+  } else {
+    deoptimization_environment = instr->environment();
+  }
+
+  RegisterEnvironmentForDeoptimization(deoptimization_environment);
+  RecordSafepoint(instr->pointer_map(),
+                  deoptimization_environment->deoptimization_index());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+    }
+    Translation translation(&translations_, frame_count);
+    environment->WriteTranslation(this, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    environment->Register(deoptimization_index, translation.index());
+    deoptimizations_.Add(environment);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
+  if (entry == NULL) {
+    Abort("bailout was not prepared");
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0) {
+    Handle<SharedFunctionInfo> shared(info_->shared_info());
+    Label no_deopt;
+    __ pushfd();
+    __ push(eax);
+    __ push(ebx);
+    __ mov(ebx, shared);
+    __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
+    __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+    __ j(not_zero, &no_deopt);
+    if (FLAG_trap_on_deopt) __ int3();
+    __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
+    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+    __ pop(ebx);
+    __ pop(eax);
+    __ popfd();
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+
+    __ bind(&no_deopt);
+    __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
+    __ pop(ebx);
+    __ pop(eax);
+    __ popfd();
+  }
+
+  if (cc == no_condition) {
+    if (FLAG_trap_on_deopt) __ int3();
+    __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    if (FLAG_trap_on_deopt) {
+      NearLabel done;
+      __ j(NegateCondition(cc), &done);
+      __ int3();
+      __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+      __ bind(&done);
+    } else {
+      __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
+    }
+  }
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  ASSERT(FLAG_deopt);
+  Handle<DeoptimizationInputData> data =
+      Factory::NewDeoptimizationInputData(length, TENURED);
+
+  data->SetTranslationByteArray(*translations_.CreateByteArray());
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+
+  Handle<FixedArray> literals =
+      Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
+  for (int i = 0; i < deoptimization_literals_.length(); i++) {
+    literals->set(i, *deoptimization_literals_[i]);
+  }
+  data->SetLiteralArray(*literals);
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, Smi::FromInt(env->ast_id()));
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal);
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  ASSERT(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+                                                    deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            int deoptimization_index) {
+  const ZoneList<LOperand*>* operands = pointers->operands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepointWithRegisters(
+          masm(), arguments, deoptimization_index);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index());
+    } else if (pointer->IsRegister()) {
+      safepoint.DefinePointerRegister(ToRegister(pointer));
+    }
+  }
+  // Register esi always contains a pointer to the context.
+  safepoint.DefinePointerRegister(esi);
+}
+
+
+void LCodeGen::RecordPosition(int position) {
+  if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  if (label->is_loop_header()) {
+    Comment(";;; B%d - LOOP entry", label->block_id());
+  } else {
+    Comment(";;; B%d", label->block_id());
+  }
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  LCodeGen::DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  // xmm0 must always be a scratch register.
+  XMMRegister xmm_scratch = xmm0;
+  LUnallocated marker_operand(LUnallocated::NONE);
+
+  Register cpu_scratch = esi;
+  bool destroys_cpu_scratch = false;
+
+  LGapResolver resolver(move->move_operands(), &marker_operand);
+  const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
+  for (int i = moves->length() - 1; i >= 0; --i) {
+    LMoveOperands move = moves->at(i);
+    LOperand* from = move.from();
+    LOperand* to = move.to();
+    ASSERT(!from->IsDoubleRegister() ||
+           !ToDoubleRegister(from).is(xmm_scratch));
+    ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
+    ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
+    ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
+    if (from->IsConstantOperand()) {
+      __ mov(ToOperand(to), ToImmediate(from));
+    } else if (from == &marker_operand) {
+      if (to->IsRegister() || to->IsStackSlot()) {
+        __ mov(ToOperand(to), cpu_scratch);
+        ASSERT(destroys_cpu_scratch);
+      } else {
+        ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
+        __ movdbl(ToOperand(to), xmm_scratch);
+      }
+    } else if (to == &marker_operand) {
+      if (from->IsRegister() || from->IsStackSlot()) {
+        __ mov(cpu_scratch, ToOperand(from));
+        destroys_cpu_scratch = true;
+      } else {
+        ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
+        __ movdbl(xmm_scratch, ToOperand(from));
+      }
+    } else if (from->IsRegister()) {
+      __ mov(ToOperand(to), ToRegister(from));
+    } else if (to->IsRegister()) {
+      __ mov(ToRegister(to), ToOperand(from));
+    } else if (from->IsStackSlot()) {
+      ASSERT(to->IsStackSlot());
+      __ push(eax);
+      __ mov(eax, ToOperand(from));
+      __ mov(ToOperand(to), eax);
+      __ pop(eax);
+    } else if (from->IsDoubleRegister()) {
+      __ movdbl(ToOperand(to), ToDoubleRegister(from));
+    } else if (to->IsDoubleRegister()) {
+      __ movdbl(ToDoubleRegister(to), ToOperand(from));
+    } else {
+      ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
+      __ movdbl(xmm_scratch, ToOperand(from));
+      __ movdbl(ToOperand(to), xmm_scratch);
+    }
+  }
+
+  if (destroys_cpu_scratch) {
+    __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+
+  LInstruction* next = GetNextInstruction();
+  if (next != NULL && next->IsLazyBailout()) {
+    int pc = masm()->pc_offset();
+    safepoints_.SetPcAfterGap(pc);
+  }
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpConstructResult: {
+      RegExpConstructResultStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCharAt: {
+      StringCharAtStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::MathPow: {
+      MathPowStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::NumberToString: {
+      NumberToStringStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringAdd: {
+      StringAddStub stub(NO_STRING_ADD_FLAGS);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub;
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::TranscendentalCache: {
+      TranscendentalCacheStub stub(instr->transcendental_type(),
+                                   TranscendentalCacheStub::TAGGED);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  LOperand* right = instr->right();
+  ASSERT(ToRegister(instr->result()).is(edx));
+  ASSERT(ToRegister(instr->left()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(edx));
+
+  Register right_reg = ToRegister(right);
+
+  // Check for x % 0.
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  // Sign extend to edx.
+  __ cdq();
+
+  // Check for (0 % -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    NearLabel positive_left;
+    NearLabel done;
+    __ test(eax, Operand(eax));
+    __ j(not_sign, &positive_left);
+    __ idiv(right_reg);
+
+    // Test the remainder for 0, because then the result would be -0.
+    __ test(edx, Operand(edx));
+    __ j(not_zero, &done);
+
+    DeoptimizeIf(no_condition, instr->environment());
+    __ bind(&positive_left);
+    __ idiv(right_reg);
+    __ bind(&done);
+  } else {
+    __ idiv(right_reg);
+  }
+}
+
+
+void LCodeGen::DoDivI(LDivI* instr) {
+  LOperand* right = instr->right();
+  ASSERT(ToRegister(instr->result()).is(eax));
+  ASSERT(ToRegister(instr->left()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(eax));
+  ASSERT(!ToRegister(instr->right()).is(edx));
+
+  Register left_reg = eax;
+
+  // Check for x / 0.
+  Register right_reg = ToRegister(right);
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    NearLabel left_not_zero;
+    __ test(left_reg, Operand(left_reg));
+    __ j(not_zero, &left_not_zero);
+    __ test(right_reg, ToOperand(right));
+    DeoptimizeIf(sign, instr->environment());
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (-kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    NearLabel left_not_min_int;
+    __ cmp(left_reg, kMinInt);
+    __ j(not_zero, &left_not_min_int);
+    __ cmp(right_reg, -1);
+    DeoptimizeIf(zero, instr->environment());
+    __ bind(&left_not_min_int);
+  }
+
+  // Sign extend to edx.
+  __ cdq();
+  __ idiv(right_reg);
+
+  // Deoptimize if remainder is not 0.
+  __ test(edx, Operand(edx));
+  DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  LOperand* right = instr->right();
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ mov(ToRegister(instr->temp()), left);
+  }
+
+  if (right->IsConstantOperand()) {
+    __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
+  } else {
+    __ imul(left, ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    NearLabel done;
+    __ test(left, Operand(left));
+    __ j(not_zero, &done);
+    if (right->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+        DeoptimizeIf(no_condition, instr->environment());
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ or_(ToRegister(instr->temp()), ToOperand(right));
+      DeoptimizeIf(sign, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int right_operand = ToInteger32(LConstantOperand::cast(right));
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), right_operand);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), ToOperand(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+  ASSERT(left->IsRegister());
+  if (right->IsRegister()) {
+    ASSERT(ToRegister(right).is(ecx));
+
+    switch (instr->op()) {
+      case Token::SAR:
+        __ sar_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shr_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ test(ToRegister(left), Immediate(0x80000000));
+          DeoptimizeIf(not_zero, instr->environment());
+        }
+        break;
+      case Token::SHL:
+        __ shl_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sar(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ test(ToRegister(left), Immediate(0x80000000));
+          DeoptimizeIf(not_zero, instr->environment());
+        } else {
+          __ shr(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          __ shl(ToRegister(left), shift_count);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ sub(ToOperand(left), ToImmediate(right));
+  } else {
+    __ sub(ToRegister(left), ToOperand(right));
+  }
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  ASSERT(instr->result()->IsDoubleRegister());
+  XMMRegister res = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  // Use xor to produce +0.0 in a fast and compact way, but avoid to
+  // do so if the constant is -0.0.
+  if (BitCast<uint64_t, double>(v) == 0) {
+    __ xorpd(res, res);
+  } else {
+    int32_t v_int32 = static_cast<int32_t>(v);
+    if (static_cast<double>(v_int32) == v) {
+      __ push_imm32(v_int32);
+      __ cvtsi2sd(res, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kPointerSize));
+    } else {
+      uint64_t int_val = BitCast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+      __ push_imm32(upper);
+      __ push_imm32(lower);
+      __ movdbl(res, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(2 * kPointerSize));
+    }
+  }
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoArrayLength(LArrayLength* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->value()->IsLoadElements()) {
+    // We load the length directly from the elements array.
+    Register elements = ToRegister(instr->input());
+    __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
+  } else {
+    // Check that the receiver really is an array.
+    Register array = ToRegister(instr->input());
+    Register temporary = ToRegister(instr->temporary());
+    __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
+    DeoptimizeIf(not_equal, instr->environment());
+
+    // Load length directly from the array.
+    __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
+  }
+}
+
+
+void LCodeGen::DoValueOf(LValueOf* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->temporary());
+  ASSERT(input.is(result));
+  NearLabel done;
+  // If the object is a smi return the object.
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+
+  // If the object is not a value type, return the object.
+  __ CmpObjectType(input, JS_VALUE_TYPE, map);
+  __ j(not_equal, &done);
+  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoBitNotI(LBitNotI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->Equals(instr->result()));
+  __ not_(ToRegister(input));
+}
+
+
+void LCodeGen::DoThrow(LThrow* instr) {
+  __ push(ToOperand(instr->input()));
+  CallRuntime(Runtime::kThrow, 1, instr);
+
+  if (FLAG_debug_code) {
+    Comment("Unreachable code.");
+    __ int3();
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  ASSERT(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ add(ToOperand(left), ToImmediate(right));
+  } else {
+    __ add(ToRegister(left), ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  // Modulo uses a fixed result register.
+  ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
+  switch (instr->op()) {
+    case Token::ADD:
+      __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::SUB:
+       __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
+       break;
+    case Token::MUL:
+      __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::DIV:
+      __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
+      break;
+    case Token::MOD: {
+      // Pass two doubles as arguments on the stack.
+      __ PrepareCallCFunction(4, eax);
+      __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+      __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+      __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
+
+      // Return value is in st(0) on ia32.
+      // Store it into the (fixed) result register.
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+      __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  ASSERT(ToRegister(instr->left()).is(edx));
+  ASSERT(ToRegister(instr->right()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+int LCodeGen::GetNextEmittedBlock(int block) {
+  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
+    LLabel* label = chunk_->GetLabel(i);
+    if (!label->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
+  int next_block = GetNextEmittedBlock(current_block_);
+  right_block = chunk_->LookupDestination(right_block);
+  left_block = chunk_->LookupDestination(left_block);
+
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+    __ jmp(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    Register reg = ToRegister(instr->input());
+    __ test(reg, Operand(reg));
+    EmitBranch(true_block, false_block, not_zero);
+  } else if (r.IsDouble()) {
+    XMMRegister reg = ToDoubleRegister(instr->input());
+    __ xorpd(xmm0, xmm0);
+    __ ucomisd(reg, xmm0);
+    EmitBranch(true_block, false_block, not_equal);
+  } else {
+    ASSERT(r.IsTagged());
+    Register reg = ToRegister(instr->input());
+    if (instr->hydrogen()->type().IsBoolean()) {
+      __ cmp(reg, Factory::true_value());
+      EmitBranch(true_block, false_block, equal);
+    } else {
+      Label* true_label = chunk_->GetAssemblyLabel(true_block);
+      Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+      __ cmp(reg, Factory::undefined_value());
+      __ j(equal, false_label);
+      __ cmp(reg, Factory::true_value());
+      __ j(equal, true_label);
+      __ cmp(reg, Factory::false_value());
+      __ j(equal, false_label);
+      __ test(reg, Operand(reg));
+      __ j(equal, false_label);
+      __ test(reg, Immediate(kSmiTagMask));
+      __ j(zero, true_label);
+
+      // Test for double values. Zero is false.
+      NearLabel call_stub;
+      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+             Factory::heap_number_map());
+      __ j(not_equal, &call_stub);
+      __ fldz();
+      __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+      __ FCmp();
+      __ j(zero, false_label);
+      __ jmp(true_label);
+
+      // The conversion stub doesn't cause garbage collections so it's
+      // safe to not record a safepoint after the call.
+      __ bind(&call_stub);
+      ToBooleanStub stub;
+      __ pushad();
+      __ push(reg);
+      __ CallStub(&stub);
+      __ test(eax, Operand(eax));
+      __ popad();
+      EmitBranch(true_block, false_block, not_zero);
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+  block = chunk_->LookupDestination(block);
+  int next_block = GetNextEmittedBlock(current_block_);
+  if (block != next_block) {
+    // Perform stack overflow check if this goto needs it before jumping.
+    if (deferred_stack_check != NULL) {
+      ExternalReference stack_limit =
+          ExternalReference::address_of_stack_limit();
+      __ cmp(esp, Operand::StaticVariable(stack_limit));
+      __ j(above_equal, chunk_->GetAssemblyLabel(block));
+      __ jmp(deferred_stack_check->entry());
+      deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
+    } else {
+      __ jmp(chunk_->GetAssemblyLabel(block));
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
+  __ pushad();
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ popad();
+}
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+   private:
+    LGoto* instr_;
+  };
+
+  DeferredStackCheck* deferred = NULL;
+  if (instr->include_stack_check()) {
+    deferred = new DeferredStackCheck(this, instr);
+  }
+  EmitGoto(instr->block_id(), deferred);
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  if (right->IsConstantOperand()) {
+    __ cmp(ToOperand(left), ToImmediate(right));
+  } else {
+    __ cmp(ToRegister(left), ToOperand(right));
+  }
+}
+
+
+void LCodeGen::DoCmpID(LCmpID* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  NearLabel unordered;
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the unordered case, which produces a false value.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, &unordered, not_taken);
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  NearLabel done;
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
+  __ j(cc, &done);
+
+  __ bind(&unordered);
+  __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the false block.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+  } else {
+    EmitCmpI(left, right);
+  }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
+}
+
+
+void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  Register result = ToRegister(instr->result());
+
+  __ cmp(left, Operand(right));
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  NearLabel done;
+  __ j(equal, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+  __ cmp(left, Operand(right));
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoIsNull(LIsNull* instr) {
+  Register reg = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Materialize false.
+
+  __ cmp(reg, Factory::null_value());
+  if (instr->is_strict()) {
+    __ mov(result, Handle<Object>(Heap::true_value()));
+    NearLabel done;
+    __ j(equal, &done);
+    __ mov(result, Handle<Object>(Heap::false_value()));
+    __ bind(&done);
+  } else {
+    NearLabel true_value, false_value, done;
+    __ j(equal, &true_value);
+    __ cmp(reg, Factory::undefined_value());
+    __ j(equal, &true_value);
+    __ test(reg, Immediate(kSmiTagMask));
+    __ j(zero, &false_value);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = result;
+    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    __ j(not_zero, &true_value);
+    __ bind(&false_value);
+    __ mov(result, Handle<Object>(Heap::false_value()));
+    __ jmp(&done);
+    __ bind(&true_value);
+    __ mov(result, Handle<Object>(Heap::true_value()));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ cmp(reg, Factory::null_value());
+  if (instr->is_strict()) {
+    EmitBranch(true_block, false_block, equal);
+  } else {
+    Label* true_label = chunk_->GetAssemblyLabel(true_block);
+    Label* false_label = chunk_->GetAssemblyLabel(false_block);
+    __ j(equal, true_label);
+    __ cmp(reg, Factory::undefined_value());
+    __ j(equal, true_label);
+    __ test(reg, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    // Check for undetectable objects by looking in the bit field in
+    // the map. The object has already been smi checked.
+    Register scratch = ToRegister(instr->temp());
+    __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    EmitBranch(true_block, false_block, not_zero);
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  ASSERT(!input.is(temp1));
+  ASSERT(!input.is(temp2));
+  ASSERT(!temp1.is(temp2));
+
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(equal, is_not_object);
+
+  __ cmp(input, Factory::null_value());
+  __ j(equal, is_object);
+
+  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
+  __ test(temp2, Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, is_not_object);
+
+  __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
+  __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
+  __ j(below, is_not_object);
+  __ cmp(temp2, LAST_JS_OBJECT_TYPE);
+  return below_equal;
+}
+
+
+void LCodeGen::DoIsObject(LIsObject* instr) {
+  Register reg = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+  Label is_false, is_true, done;
+
+  Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+  __ j(true_cond, &is_true);
+
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ jmp(&done);
+
+  __ bind(&is_true);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
+void LCodeGen::DoIsSmi(LIsSmi* instr) {
+  Operand input = ToOperand(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ test(input, Immediate(kSmiTagMask));
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  NearLabel done;
+  __ j(zero, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Operand input = ToOperand(instr->input());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ test(input, Immediate(kSmiTagMask));
+  EmitBranch(true_block, false_block, zero);
+}
+
+
+InstanceType LHasInstanceType::TestType() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == FIRST_TYPE) return to;
+  ASSERT(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+
+Condition LHasInstanceType::BranchCondition() {
+  InstanceType from = hydrogen()->from();
+  InstanceType to = hydrogen()->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ test(input, Immediate(kSmiTagMask));
+  NearLabel done, is_false;
+  __ j(zero, &is_false);
+  __ CmpObjectType(input, instr->TestType(), result);
+  __ j(NegateCondition(instr->BranchCondition()), &is_false);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ jmp(&done);
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, false_label);
+
+  __ CmpObjectType(input, instr->TestType(), temp);
+  EmitBranch(true_block, false_block, instr->BranchCondition());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+
+  ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  NearLabel done;
+  __ j(not_zero, &done);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  EmitBranch(true_block, false_block, not_equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.  Only input and temp2 may alias.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  ASSERT(!input.is(temp));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
+  __ test(input, Immediate(kSmiTagMask));
+  __ j(zero, is_false);
+  __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+  __ j(below, is_false);
+
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+  if (class_name->IsEqualTo(CStrVector("Function"))) {
+    __ j(equal, is_true);
+  } else {
+    __ j(equal, is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+  if (class_name->IsEqualTo(CStrVector("Object"))) {
+    __ j(not_equal, is_true);
+  } else {
+    __ j(not_equal, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(temp, FieldOperand(temp,
+                            SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is a symbol because it's a literal.
+  // The name in the constructor is a symbol because of the way the context is
+  // booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are symbols it is sufficient to use an identity
+  // comparison.
+  __ cmp(temp, class_name);
+  // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  ASSERT(input.is(result));
+  Register temp = ToRegister(instr->temporary());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+  NearLabel done;
+  Label is_true, is_false;
+
+  EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
+
+  __ j(not_equal, &is_false);
+
+  __ bind(&is_true);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+  __ jmp(&done);
+
+  __ bind(&is_false);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temporary());
+  Register temp2 = ToRegister(instr->temporary2());
+  if (input.is(temp)) {
+    // Swap.
+    Register swapper = temp;
+    temp = temp2;
+    temp2 = swapper;
+  }
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->input());
+  int true_block = instr->true_block_id();
+  int false_block = instr->false_block_id();
+
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+  EmitBranch(true_block, false_block, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  // Object and function are in fixed registers eax and edx.
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  NearLabel true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(zero, &true_value);
+  __ mov(ToRegister(instr->result()), Factory::false_value());
+  __ jmp(&done);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), Factory::true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ test(eax, Operand(eax));
+  EmitBranch(true_block, false_block, zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  NearLabel true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(condition, &true_value);
+  __ mov(ToRegister(instr->result()), Factory::false_value());
+  __ jmp(&done);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), Factory::true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  // The compare stub expects compare condition and the input operands
+  // reversed for GT and LTE.
+  Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
+  __ test(eax, Operand(eax));
+  EmitBranch(true_block, false_block, condition);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace) {
+    // Preserve the return value on the stack and rely on the runtime
+    // call to return the value in the same register.
+    __ push(eax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  __ mov(esp, ebp);
+  __ pop(ebp);
+  __ ret((ParameterCount() + 1) * kPointerSize);
+}
+
+
+void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
+  if (instr->hydrogen()->check_hole_value()) {
+    __ cmp(result, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+  Register value = ToRegister(instr->input());
+  __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  Register object = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  if (instr->hydrogen()->is_in_object()) {
+    __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
+  } else {
+    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadElements(LLoadElements* instr) {
+  ASSERT(instr->result()->Equals(instr->input()));
+  Register reg = ToRegister(instr->input());
+  __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
+  if (FLAG_debug_code) {
+    NearLabel done;
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    __ j(equal, &done);
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Immediate(Factory::fixed_cow_array_map()));
+    __ Check(equal, "Check for fast elements failed.");
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register length = ToRegister(instr->length());
+  Operand index = ToOperand(instr->index());
+  Register result = ToRegister(instr->result());
+
+  __ sub(length, index);
+  DeoptimizeIf(below_equal, instr->environment());
+
+  __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+}
+
+
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = ToRegister(instr->key());
+  Register result;
+  if (instr->load_result() != NULL) {
+    result = ToRegister(instr->load_result());
+  } else {
+    result = ToRegister(instr->result());
+    ASSERT(result.is(elements));
+  }
+
+  // Load the result.
+  __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+
+  Representation r = instr->hydrogen()->representation();
+  if (r.IsInteger32()) {
+    // Untag and check for smi.
+    __ SmiUntag(result);
+    DeoptimizeIf(carry, instr->environment());
+  } else if (r.IsDouble()) {
+    EmitNumberUntagD(result,
+                     ToDoubleRegister(instr->result()),
+                     instr->environment());
+  } else {
+    // Check for the hole value.
+    ASSERT(r.IsTagged());
+    __ cmp(result, Factory::the_hole_value());
+    DeoptimizeIf(equal, instr->environment());
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->key()).is(eax));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  // Check for arguments adapter frame.
+  Label done, adapted;
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+  __ cmp(Operand(result),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &adapted);
+
+  // No arguments adaptor frame.
+  __ mov(result, Operand(ebp));
+  __ jmp(&done);
+
+  // Arguments adaptor frame present.
+  __ bind(&adapted);
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // Done. Pointer to topmost argument is in result.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Operand elem = ToOperand(instr->input());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // No arguments adaptor frame. Number of arguments is fixed.
+  __ cmp(ebp, elem);
+  __ mov(result, Immediate(scope()->num_parameters()));
+  __ j(equal, &done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result,
+                         ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Done. Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  ASSERT(ToRegister(instr->function()).is(edi));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  // If the receiver is null or undefined, we have to pass the
+  // global object as a receiver.
+  NearLabel global_receiver, receiver_ok;
+  __ cmp(receiver, Factory::null_value());
+  __ j(equal, &global_receiver);
+  __ cmp(receiver, Factory::undefined_value());
+  __ j(not_equal, &receiver_ok);
+  __ bind(&global_receiver);
+  __ mov(receiver, GlobalObjectOperand());
+  __ bind(&receiver_ok);
+
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+
+  Label invoke;
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, kArgumentsLimit);
+  DeoptimizeIf(above, instr->environment());
+
+  __ push(receiver);
+  __ mov(receiver, length);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label loop;
+  // length is a small non-negative integer, due to the test above.
+  __ test(length, Operand(length));
+  __ j(zero, &invoke);
+  __ bind(&loop);
+  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+  __ dec(length);
+  __ j(not_zero, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  ASSERT(receiver.is(eax));
+  v8::internal::ParameterCount actual(eax);
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->input();
+  if (argument->IsConstantOperand()) {
+    __ push(ToImmediate(argument));
+  } else {
+    __ push(ToOperand(argument));
+  }
+}
+
+
+void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+}
+
+
+void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int arity,
+                                 LInstruction* instr) {
+  // Change context if needed.
+  bool change_context =
+      (graph()->info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  }
+
+  // Set eax to arguments count if adaption is not needed. Assumes that eax
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(eax, arity);
+  }
+
+  LPointerMap* pointers = instr->pointer_map();
+  RecordPosition(pointers->position());
+
+  // Invoke function.
+  if (*function == *graph()->info()->closure()) {
+    __ CallSelf();
+  } else {
+    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  // Setup deoptimization.
+  RegisterLazyDeoptimization(instr);
+
+  // Restore context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->function());
+  CallKnownFunction(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+  Register input_reg = ToRegister(instr->input());
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  DeoptimizeIf(not_equal, instr->environment());
+
+  Label done;
+  Register tmp = input_reg.is(eax) ? ecx : eax;
+  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  Label negative;
+  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive,
+  // just return it.
+  __ test(tmp, Immediate(HeapNumber::kSignMask));
+  __ j(not_zero, &negative);
+  __ mov(tmp, input_reg);
+  __ jmp(&done);
+
+  __ bind(&negative);
+
+  Label allocated, slow;
+  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+  __ jmp(&allocated);
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  // Set the pointer to the new heap number in tmp.
+  if (!tmp.is(eax)) __ mov(tmp, eax);
+
+  // Restore input_reg after call to runtime.
+  __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
+
+  __ bind(&allocated);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  __ and_(tmp2, ~HeapNumber::kSignMask);
+  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+
+  __ bind(&done);
+  __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
+
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LUnaryMathOperation* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+   private:
+    LUnaryMathOperation* instr_;
+  };
+
+  ASSERT(instr->input()->Equals(instr->result()));
+  Representation r = instr->hydrogen()->value()->representation();
+
+  if (r.IsDouble()) {
+    XMMRegister  scratch = xmm0;
+    XMMRegister input_reg = ToDoubleRegister(instr->input());
+    __ pxor(scratch, scratch);
+    __ subsd(scratch, input_reg);
+    __ pand(input_reg, scratch);
+  } else if (r.IsInteger32()) {
+    Register input_reg = ToRegister(instr->input());
+    __ test(input_reg, Operand(input_reg));
+    Label is_positive;
+    __ j(not_sign, &is_positive);
+    __ neg(input_reg);
+    __ test(input_reg, Operand(input_reg));
+    DeoptimizeIf(negative, instr->environment());
+    __ bind(&is_positive);
+  } else {  // Tagged case.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new DeferredMathAbsTaggedHeapNumber(this, instr);
+    Label not_smi;
+    Register input_reg = ToRegister(instr->input());
+    // Smi check.
+    __ test(input_reg, Immediate(kSmiTagMask));
+    __ j(not_zero, deferred->entry());
+    __ test(input_reg, Operand(input_reg));
+    Label is_positive;
+    __ j(not_sign, &is_positive);
+    __ neg(input_reg);
+
+    __ test(input_reg, Operand(input_reg));
+    DeoptimizeIf(negative, instr->environment());
+
+    __ bind(&is_positive);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+  __ ucomisd(input_reg, xmm_scratch);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    DeoptimizeIf(below, instr->environment());
+  }
+
+  // Use truncating instruction (OK because input is positive).
+  __ cvttsd2si(output_reg, Operand(input_reg));
+
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x80000000u);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+
+  // xmm_scratch = 0.5
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+
+  // input = input + 0.5
+  __ addsd(input_reg, xmm_scratch);
+
+  // We need to return -0 for the input range [-0.5, 0[, otherwise
+  // compute Math.floor(value + 0.5).
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below_equal, instr->environment());
+  } else {
+    // If we don't need to bailout on -0, we check only bailout
+    // on negative inputs.
+    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+
+  // Compute Math.floor(value + 0.5).
+  // Use truncating instruction (OK because input is positive).
+  __ cvttsd2si(output_reg, Operand(input_reg));
+
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x80000000u);
+  DeoptimizeIf(equal, instr->environment());
+}
+
+
+void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  __ sqrtsd(input_reg, input_reg);
+}
+
+
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+  XMMRegister xmm_scratch = xmm0;
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  ExternalReference negative_infinity =
+      ExternalReference::address_of_negative_infinity();
+  __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
+  __ ucomisd(xmm_scratch, input_reg);
+  DeoptimizeIf(equal, instr->environment());
+  __ sqrtsd(input_reg, input_reg);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  if (exponent_type.IsDouble()) {
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+    __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+  } else if (exponent_type.IsInteger32()) {
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    ASSERT(!ToRegister(right).is(ebx));
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
+    __ CallCFunction(ExternalReference::power_double_int_function(), 4);
+  } else {
+    ASSERT(exponent_type.IsTagged());
+    CpuFeatures::Scope scope(SSE2);
+    Register right_reg = ToRegister(right);
+
+    Label non_smi, call;
+    __ test(right_reg, Immediate(kSmiTagMask));
+    __ j(not_zero, &non_smi);
+    __ SmiUntag(right_reg);
+    __ cvtsi2sd(result_reg, Operand(right_reg));
+    __ jmp(&call);
+
+    __ bind(&non_smi);
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    ASSERT(!right_reg.is(ebx));
+    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+    DeoptimizeIf(not_equal, instr->environment());
+    __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
+
+    __ bind(&call);
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
+    __ CallCFunction(ExternalReference::power_double_double_function(), 4);
+  }
+
+  // Return value is in st(0) on ia32.
+  // Store it into the (fixed) result register.
+  __ sub(Operand(esp), Immediate(kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ movdbl(result_reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::COS,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
+  TranscendentalCacheStub stub(TranscendentalCache::SIN,
+                               TranscendentalCacheStub::UNTAGGED);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs:
+      DoMathAbs(instr);
+      break;
+    case kMathFloor:
+      DoMathFloor(instr);
+      break;
+    case kMathRound:
+      DoMathRound(instr);
+      break;
+    case kMathSqrt:
+      DoMathSqrt(instr);
+      break;
+    case kMathPowHalf:
+      DoMathPowHalf(instr);
+      break;
+    case kMathCos:
+      DoMathCos(instr);
+      break;
+    case kMathSin:
+      DoMathSin(instr);
+      break;
+    case kMathLog:
+      DoMathLog(instr);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallNamed(LCallNamed* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(ecx, instr->name());
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Drop(1);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
+  __ mov(ecx, instr->name());
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
+  ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->target());
+  CallKnownFunction(instr->target(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  ASSERT(ToRegister(instr->input()).is(edi));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Set(eax, Immediate(instr->arity()));
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Register object = ToRegister(instr->object());
+  Register value = ToRegister(instr->value());
+  int offset = instr->offset();
+
+  if (!instr->transition().is_null()) {
+    __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+  }
+
+  // Do the store.
+  if (instr->is_in_object()) {
+    __ mov(FieldOperand(object, offset), value);
+    if (instr->needs_write_barrier()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWrite(object, offset, value, temp);
+    }
+  } else {
+    Register temp = ToRegister(instr->temp());
+    __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+    __ mov(FieldOperand(temp, offset), value);
+    if (instr->needs_write_barrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWrite(temp, offset, value, object);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+  DeoptimizeIf(above_equal, instr->environment());
+}
+
+
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->object());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset =
+        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
+    __ mov(FieldOperand(elements, offset), value);
+  } else {
+    __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
+           value);
+  }
+
+  // Update the write barrier unless we're certain that we're storing a smi.
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    // Compute address of modified element and store it into key register.
+    __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
+    __ RecordWrite(elements, key, value);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  ASSERT(ToRegister(instr->object()).is(edx));
+  ASSERT(ToRegister(instr->key()).is(ecx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  ASSERT(output->IsDoubleRegister());
+  __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI: public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
+  __ SmiTag(reg);
+  __ j(overflow, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
+  Label slow;
+  Register reg = ToRegister(instr->input());
+  Register tmp = reg.is(eax) ? ecx : eax;
+
+  // Preserve the value of all registers.
+  __ PushSafepointRegisters();
+
+  // There was overflow, so bits 30 and 31 of the original integer
+  // disagree. Try to allocate a heap number in new space and store
+  // the value in there. If that fails, call the runtime system.
+  NearLabel done;
+  __ SmiUntag(reg);
+  __ xor_(reg, 0x80000000);
+  __ cvtsi2sd(xmm0, Operand(reg));
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+    __ jmp(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
+
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  if (!reg.is(eax)) __ mov(reg, eax);
+
+  // Done. Put the value in xmm0 into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  XMMRegister input_reg = ToDoubleRegister(instr->input());
+  Register reg = ToRegister(instr->result());
+  Register tmp = ToRegister(instr->temp());
+
+  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ Set(reg, Immediate(0));
+
+  __ PushSafepointRegisters();
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
+  __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
+  __ SmiTag(ToRegister(input));
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    __ test(ToRegister(input), Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr->environment());
+  }
+  __ SmiUntag(ToRegister(input));
+}
+
+
+void LCodeGen::EmitNumberUntagD(Register input_reg,
+                                XMMRegister result_reg,
+                                LEnvironment* env) {
+  NearLabel load_smi, heap_number, done;
+
+  // Smi check.
+  __ test(input_reg, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi, not_taken);
+
+  // Heap number map check.
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+  __ j(equal, &heap_number);
+
+  __ cmp(input_reg, Factory::undefined_value());
+  DeoptimizeIf(not_equal, env);
+
+  // Convert undefined to NaN.
+  __ push(input_reg);
+  __ mov(input_reg, Factory::nan_value());
+  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ pop(input_reg);
+  __ jmp(&done);
+
+  // Heap number to XMM conversion.
+  __ bind(&heap_number);
+  __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  // Smi to XMM conversion
+  __ bind(&load_smi);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ cvtsi2sd(result_reg, Operand(input_reg));
+  __ SmiTag(input_reg);  // Retag smi.
+  __ bind(&done);
+}
+
+
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  NearLabel done, heap_number;
+  Register input_reg = ToRegister(instr->input());
+
+  // Heap number map check.
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         Factory::heap_number_map());
+
+  if (instr->truncating()) {
+    __ j(equal, &heap_number);
+    // Check for undefined. Undefined is converted to zero for truncating
+    // conversions.
+    __ cmp(input_reg, Factory::undefined_value());
+    DeoptimizeIf(not_equal, instr->environment());
+    __ mov(input_reg, 0);
+    __ jmp(&done);
+
+    __ bind(&heap_number);
+    if (CpuFeatures::IsSupported(SSE3)) {
+      CpuFeatures::Scope scope(SSE3);
+      NearLabel convert;
+      // Use more powerful conversion when sse3 is available.
+      // Load x87 register with heap number.
+      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+      // Get exponent alone and check for too-big exponent.
+      __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+      __ and_(input_reg, HeapNumber::kExponentMask);
+      const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
+      __ j(less, &convert);
+      // Pop FPU stack before deoptimizing.
+      __ ffree(0);
+      __ fincstp();
+      DeoptimizeIf(no_condition, instr->environment());
+
+      // Reserve space for 64 bit answer.
+      __ bind(&convert);
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      // Do conversion, which cannot fail because we checked the exponent.
+      __ fisttp_d(Operand(esp, 0));
+      __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
+      __ add(Operand(esp), Immediate(kDoubleSize));
+    } else {
+      NearLabel deopt;
+      XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+      __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+      __ cvttsd2si(input_reg, Operand(xmm0));
+      __ cmp(input_reg, 0x80000000u);
+      __ j(not_equal, &done);
+      // Check if the input was 0x8000000 (kMinInt).
+      // If no, then we got an overflow and we deoptimize.
+      ExternalReference min_int = ExternalReference::address_of_min_int();
+      __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
+      __ ucomisd(xmm_temp, xmm0);
+      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    }
+  } else {
+    // Deoptimize if we don't have a heap number.
+    DeoptimizeIf(not_equal, instr->environment());
+
+    XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
+    __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, Operand(xmm0));
+    __ cvtsi2sd(xmm_temp, Operand(input_reg));
+    __ ucomisd(xmm0, xmm_temp);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ test(input_reg, Operand(input_reg));
+      __ j(not_zero, &done);
+      __ movmskpd(input_reg, xmm0);
+      __ and_(input_reg, 1);
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  ASSERT(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
+
+  // Smi check.
+  __ test(input_reg, Immediate(kSmiTagMask));
+  __ j(not_zero, deferred->entry());
+
+  // Smi to int32 conversion
+  __ SmiUntag(input_reg);  // Untag smi.
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  XMMRegister result_reg = ToDoubleRegister(result);
+
+  EmitNumberUntagD(input_reg, result_reg, instr->environment());
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  ASSERT(result->IsRegister());
+
+  XMMRegister input_reg = ToDoubleRegister(input);
+  Register result_reg = ToRegister(result);
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    __ cvttsd2si(result_reg, Operand(input_reg));
+    __ cmp(result_reg, 0x80000000u);
+    if (CpuFeatures::IsSupported(SSE3)) {
+      // This will deoptimize if the exponent of the input in out of range.
+      CpuFeatures::Scope scope(SSE3);
+      NearLabel convert, done;
+      __ j(not_equal, &done);
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ movdbl(Operand(esp, 0), input_reg);
+      // Get exponent alone and check for too-big exponent.
+      __ mov(result_reg, Operand(esp, sizeof(int32_t)));
+      __ and_(result_reg, HeapNumber::kExponentMask);
+      const uint32_t kTooBigExponent =
+          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+      __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
+      __ j(less, &convert);
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      DeoptimizeIf(no_condition, instr->environment());
+      __ bind(&convert);
+      // Do conversion, which cannot fail because we checked the exponent.
+      __ fld_d(Operand(esp, 0));
+      __ fisttp_d(Operand(esp, 0));
+      __ mov(result_reg, Operand(esp, 0));  // Low word of answer is the result.
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      __ bind(&done);
+    } else {
+      // This will bail out if the input was not in the int32 range (or,
+      // unfortunately, if the input was 0x80000000).
+      DeoptimizeIf(equal, instr->environment());
+    }
+  } else {
+    NearLabel done;
+    __ cvttsd2si(result_reg, Operand(input_reg));
+    __ cvtsi2sd(xmm0, Operand(result_reg));
+    __ ucomisd(xmm0, input_reg);
+    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(parity_even, instr->environment());  // NaN.
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // The integer converted back is equal to the original. We
+      // only have to test if we got -0 as an input.
+      __ test(result_reg, Operand(result_reg));
+      __ j(not_zero, &done);
+      __ movmskpd(result_reg, input_reg);
+      // Bit 0 contains the sign of the double in input_reg.
+      // If input was positive, we are ok and return 0, otherwise
+      // deoptimize.
+      __ and_(result_reg, 1);
+      DeoptimizeIf(not_zero, instr->environment());
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  __ test(ToRegister(input), Immediate(kSmiTagMask));
+  DeoptimizeIf(instr->condition(), instr->environment());
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->input());
+  Register temp = ToRegister(instr->temp());
+  InstanceType first = instr->hydrogen()->first();
+  InstanceType last = instr->hydrogen()->last();
+
+  __ test(input, Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr->environment());
+
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+          static_cast<int8_t>(first));
+
+  // If there is only one type in the interval check for equality.
+  if (first == last) {
+    DeoptimizeIf(not_equal, instr->environment());
+  } else {
+    DeoptimizeIf(below, instr->environment());
+    // Omit check for the last type.
+    if (last != LAST_TYPE) {
+      __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+              static_cast<int8_t>(last));
+      DeoptimizeIf(above, instr->environment());
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
+  ASSERT(instr->input()->IsRegister());
+  Register reg = ToRegister(instr->input());
+  __ cmp(reg, instr->hydrogen()->target());
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoCheckMap(LCheckMap* instr) {
+  LOperand* input = instr->input();
+  ASSERT(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         instr->hydrogen()->map());
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
+  if (Heap::InNewSpace(*prototype)) {
+    Handle<JSGlobalPropertyCell> cell =
+        Factory::NewJSGlobalPropertyCell(prototype);
+    __ mov(result, Operand::Cell(cell));
+  } else {
+    __ mov(result, prototype);
+  }
+}
+
+
+void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+  Register reg = ToRegister(instr->temp());
+
+  Handle<JSObject> holder = instr->holder();
+  Handle<Map> receiver_map = instr->receiver_map();
+  Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
+
+  // Load prototype object.
+  LoadPrototype(reg, current_prototype);
+
+  // Check prototype maps up to the holder.
+  while (!current_prototype.is_identical_to(holder)) {
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Handle<Map>(current_prototype->map()));
+    DeoptimizeIf(not_equal, instr->environment());
+    current_prototype =
+        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
+    // Load next prototype object.
+    LoadPrototype(reg, current_prototype);
+  }
+
+  // Check the holder map.
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Handle<Map>(current_prototype->map()));
+  DeoptimizeIf(not_equal, instr->environment());
+}
+
+
+void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->constant_elements()));
+
+  // Pick the right runtime function or stub to call.
+  int length = instr->hydrogen()->length();
+  if (instr->hydrogen()->IsCopyOnWrite()) {
+    ASSERT(instr->hydrogen()->depth() == 1);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
+  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
+  } else {
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    FastCloneShallowArrayStub stub(mode, length);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->constant_properties()));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+
+  // Pick the right runtime function or stub to call.
+  if (instr->hydrogen()->depth() > 1) {
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+  } else {
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+  }
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  NearLabel materialized;
+  // Registers will be used as follows:
+  // edi = JS function.
+  // ecx = literals array.
+  // ebx = regexp literal.
+  // eax = regexp literal clone.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+  int literal_offset = FixedArray::kHeaderSize +
+      instr->hydrogen()->literal_index() * kPointerSize;
+  __ mov(ebx, FieldOperand(ecx, literal_offset));
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(not_equal, &materialized);
+
+  // Create regexp literal using runtime function
+  // Result will be in eax.
+  __ push(ecx);
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ push(Immediate(instr->hydrogen()->pattern()));
+  __ push(Immediate(instr->hydrogen()->flags()));
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(ebx, eax);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(size)));
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(ebx);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ mov(edx, FieldOperand(ebx, i));
+    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
+    __ mov(FieldOperand(eax, i), edx);
+    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
+    __ mov(FieldOperand(eax, size - kPointerSize), edx);
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
+  bool pretenure = !instr->hydrogen()->pretenure();
+  if (shared_info->num_literals() == 0 && !pretenure) {
+    FastNewClosureStub stub;
+    __ push(Immediate(shared_info));
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ push(esi);
+    __ push(Immediate(shared_info));
+    __ push(Immediate(pretenure
+                      ? Factory::true_value()
+                      : Factory::false_value()));
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  LOperand* input = instr->input();
+  if (input->IsConstantOperand()) {
+    __ push(ToImmediate(input));
+  } else {
+    __ push(ToOperand(input));
+  }
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
+  Register input = ToRegister(instr->input());
+  Register result = ToRegister(instr->result());
+  Label true_label;
+  Label false_label;
+  NearLabel done;
+
+  Condition final_branch_condition = EmitTypeofIs(&true_label,
+                                                  &false_label,
+                                                  input,
+                                                  instr->type_literal());
+  __ j(final_branch_condition, &true_label);
+  __ bind(&false_label);
+  __ mov(result, Handle<Object>(Heap::false_value()));
+  __ jmp(&done);
+
+  __ bind(&true_label);
+  __ mov(result, Handle<Object>(Heap::true_value()));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->input());
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* true_label = chunk_->GetAssemblyLabel(true_block);
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Condition final_branch_condition = no_condition;
+  if (type_name->Equals(Heap::number_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, true_label);
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+           Factory::heap_number_map());
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::string_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, false_label);
+    __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
+    final_branch_condition = below;
+
+  } else if (type_name->Equals(Heap::boolean_symbol())) {
+    __ cmp(input, Handle<Object>(Heap::true_value()));
+    __ j(equal, true_label);
+    __ cmp(input, Handle<Object>(Heap::false_value()));
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::undefined_symbol())) {
+    __ cmp(input, Factory::undefined_value());
+    __ j(equal, true_label);
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    // Check for undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    final_branch_condition = not_zero;
+
+  } else if (type_name->Equals(Heap::function_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    // Regular expressions => 'function' (they are callable).
+    __ CmpInstanceType(input, JS_REGEXP_TYPE);
+    final_branch_condition = equal;
+
+  } else if (type_name->Equals(Heap::object_symbol())) {
+    __ test(input, Immediate(kSmiTagMask));
+    __ j(zero, false_label);
+    __ cmp(input, Factory::null_value());
+    __ j(equal, true_label);
+    // Regular expressions => 'function', not 'object'.
+    __ CmpObjectType(input, JS_REGEXP_TYPE, input);
+    __ j(equal, false_label);
+    // Check for undetectable objects => false.
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(not_zero, false_label);
+    // Check for JS objects => true.
+    __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
+    __ j(below, false_label);
+    __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
+    final_branch_condition = below_equal;
+
+  } else {
+    final_branch_condition = not_equal;
+    __ jmp(false_label);
+    // A dead branch instruction will be generated after this point.
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  // No code for lazy bailout instruction. Used to capture environment after a
+  // call for populating the safepoint data with deoptimization data.
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  DeoptimizeIf(no_condition, instr->environment());
+}
+
+
+void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
+  LOperand* obj = instr->object();
+  LOperand* key = instr->key();
+  __ push(ToOperand(obj));
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
+  RecordPosition(instr->pointer_map()->position());
+  SafepointGenerator safepoint_generator(this,
+                                         instr->pointer_map(),
+                                         Safepoint::kNoDeoptimizationIndex);
+  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  // Perform stack overflow check.
+  NearLabel done;
+  ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &done);
+
+  StackCheckStub stub;
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
+                                   instr->SpilledDoubleRegisterArray());
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  ASSERT(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment);
+  ASSERT(osr_pc_offset_ == -1);
+  osr_pc_offset_ = masm()->pc_offset();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
new file mode 100644
index 0000000..6d8173a
--- /dev/null
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -0,0 +1,265 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_CODEGEN_IA32_H_
+#define V8_IA32_LITHIUM_CODEGEN_IA32_H_
+
+#include "ia32/lithium-ia32.h"
+
+#include "checks.h"
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : chunk_(chunk),
+        masm_(assembler),
+        info_(info),
+        current_block_(-1),
+        current_instruction_(-1),
+        instructions_(chunk->instructions()),
+        deoptimizations_(4),
+        deoptimization_literals_(8),
+        inlined_function_count_(0),
+        scope_(chunk->graph()->info()->scope()),
+        status_(UNUSED),
+        deferred_(8),
+        osr_pc_offset_(-1) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredNumberTagI(LNumberTagI* instr);
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+  void DoDeferredStackCheck(LGoto* instr);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  LChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk_->graph(); }
+  MacroAssembler* masm() const { return masm_; }
+
+  int GetNextEmittedBlock(int block);
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int StackSlotCount() const { return chunk()->spill_slot_count(); }
+  int ParameterCount() const { return scope()->num_parameters(); }
+
+  void Abort(const char* format, ...);
+  void Comment(const char* format, ...);
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  bool GeneratePrologue();
+  bool GenerateBody();
+  bool GenerateDeferredCode();
+  bool GenerateSafepointTable();
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+  void CallRuntime(Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr);
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int arity,
+                         LInstruction* instr);
+
+  void LoadPrototype(Register result, Handle<JSObject> prototype);
+
+  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+
+  void AddToTranslation(Translation* translation,
+                        LOperand* op,
+                        bool is_tagged);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  XMMRegister ToDoubleRegister(int index) const;
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+  int ToInteger32(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op) const;
+  Immediate ToImmediate(LOperand* op);
+
+  // Specific math operations - used from DoUnaryMathOperation.
+  void DoMathAbs(LUnaryMathOperation* instr);
+  void DoMathFloor(LUnaryMathOperation* instr);
+  void DoMathRound(LUnaryMathOperation* instr);
+  void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
+  void DoMathLog(LUnaryMathOperation* instr);
+  void DoMathCos(LUnaryMathOperation* instr);
+  void DoMathSin(LUnaryMathOperation* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    int deoptimization_index);
+  void RecordPosition(int position);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+  void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(Register input, XMMRegister result, LEnvironment* env);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  LChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  Status status_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen), external_exit_(NULL) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+
+  void SetExit(Label *exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
new file mode 100644
index 0000000..3b272d0
--- /dev/null
+++ b/src/ia32/lithium-ia32.cc
@@ -0,0 +1,2128 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+LOsrEntry::LOsrEntry() {
+  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+    register_spills_[i] = NULL;
+  }
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+    double_register_spills_[i] = NULL;
+  }
+}
+
+
+void LOsrEntry::MarkSpilledRegister(int allocation_index,
+                                    LOperand* spill_operand) {
+  ASSERT(spill_operand->IsStackSlot());
+  ASSERT(register_spills_[allocation_index] == NULL);
+  register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
+                                          LOperand* spill_operand) {
+  ASSERT(spill_operand->IsDoubleStackSlot());
+  ASSERT(double_register_spills_[allocation_index] == NULL);
+  double_register_spills_[allocation_index] = spill_operand;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) const {
+  stream->Add("%s ", this->Mnemonic());
+  if (HasResult()) {
+    result()->PrintTo(stream);
+    stream->Add(" ");
+  }
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) const {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  for (int i = move_operands_.length() - 1; i >= 0; --i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* from = move_operands_[i].from();
+      LOperand* to = move_operands_[i].to();
+      if (from->Equals(to)) {
+        to->PrintTo(stream);
+      } else {
+        to->PrintTo(stream);
+        stream->Add(" = ");
+        from->PrintTo(stream);
+      }
+      stream->Add("; ");
+    }
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) const {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+
+void LBinaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  left()->PrintTo(stream);
+  stream->Add(" ");
+  right()->PrintTo(stream);
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  input()->PrintTo(stream);
+}
+
+
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if ");
+  input()->PrintTo(stream);
+  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_object(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if is_smi(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_instance_type(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if has_cached_array_index(");
+  input()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIs::PrintDataTo(StringStream* stream) const {
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+  stream->Add("if typeof ");
+  input()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              *hydrogen()->type_literal()->ToCString(),
+              true_block_id(), false_block_id());
+}
+
+
+void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("/%s ", hydrogen()->OpName());
+  input()->PrintTo(stream);
+}
+
+
+void LCallKeyed::PrintDataTo(StringStream* stream) const {
+  stream->Add("[ecx] #%d / ", arity());
+}
+
+
+void LCallNamed::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallGlobal::PrintDataTo(StringStream* stream) const {
+  SmartPointer<char> name_string = name()->ToCString();
+  stream->Add("%s #%d / ", *name_string, arity());
+}
+
+
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) const {
+  LUnaryOperation::PrintDataTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LClassOfTest::PrintDataTo(StringStream* stream) const {
+  stream->Add("= class_of_test(");
+  input()->PrintTo(stream);
+  stream->Add(", \"%o\")", *hydrogen()->class_name());
+}
+
+
+void LUnaryOperation::PrintDataTo(StringStream* stream) const {
+  stream->Add("= ");
+  input()->PrintTo(stream);
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+LChunk::LChunk(HGraph* graph)
+    : spill_slot_count_(0),
+      graph_(graph),
+      instructions_(32),
+      pointer_maps_(8),
+      inlined_closures_(1) {
+}
+
+
+void LChunk::Verify() const {
+  // TODO(twuerthinger): Implement verification for chunk.
+}
+
+
+int LChunk::GetNextSpillIndex(bool is_double) {
+  // Skip a slot if for a double-width slot.
+  if (is_double) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
+  int index = GetNextSpillIndex(is_double);
+  if (is_double) {
+    return LDoubleStackSlot::Create(index);
+  } else {
+    return LStackSlot::Create(index);
+  }
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  HPhase phase("Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (!goto_instr->include_stack_check() &&
+          label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LStoreNamed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) const {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LGap* gap = new LGap(block);
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap);
+    index = instructions_.length();
+    instructions_.Add(instr);
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr);
+    instructions_.Add(gap);
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+  return index;
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - graph()->info()->scope()->num_parameters() - 1;
+  ASSERT(result < 0);
+  return result;
+}
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  return (1 + graph()->info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
+}
+
+
+class LGapNode: public ZoneObject {
+ public:
+  explicit LGapNode(LOperand* operand)
+      : operand_(operand), resolved_(false), visited_id_(-1) { }
+
+  LOperand* operand() const { return operand_; }
+  bool IsResolved() const { return !IsAssigned() || resolved_; }
+  void MarkResolved() {
+    ASSERT(!IsResolved());
+    resolved_ = true;
+  }
+  int visited_id() const { return visited_id_; }
+  void set_visited_id(int id) {
+    ASSERT(id > visited_id_);
+    visited_id_ = id;
+  }
+
+  bool IsAssigned() const { return assigned_from_.is_set(); }
+  LGapNode* assigned_from() const { return assigned_from_.get(); }
+  void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
+
+ private:
+  LOperand* operand_;
+  SetOncePointer<LGapNode> assigned_from_;
+  bool resolved_;
+  int visited_id_;
+};
+
+
+LGapResolver::LGapResolver(const ZoneList<LMoveOperands>* moves,
+                           LOperand* marker_operand)
+    : nodes_(4),
+      identified_cycles_(4),
+      result_(4),
+      marker_operand_(marker_operand),
+      next_visited_id_(0) {
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) RegisterMove(move);
+  }
+}
+
+
+const ZoneList<LMoveOperands>* LGapResolver::ResolveInReverseOrder() {
+  for (int i = 0; i < identified_cycles_.length(); ++i) {
+    ResolveCycle(identified_cycles_[i]);
+  }
+
+  int unresolved_nodes;
+  do {
+    unresolved_nodes = 0;
+    for (int j = 0; j < nodes_.length(); j++) {
+      LGapNode* node = nodes_[j];
+      if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
+        AddResultMove(node->assigned_from(), node);
+        node->MarkResolved();
+      }
+      if (!node->IsResolved()) ++unresolved_nodes;
+    }
+  } while (unresolved_nodes > 0);
+  return &result_;
+}
+
+
+void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
+  AddResultMove(from->operand(), to->operand());
+}
+
+
+void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
+  result_.Add(LMoveOperands(from, to));
+}
+
+
+void LGapResolver::ResolveCycle(LGapNode* start) {
+  ZoneList<LOperand*> circle_operands(8);
+  circle_operands.Add(marker_operand_);
+  LGapNode* cur = start;
+  do {
+    cur->MarkResolved();
+    circle_operands.Add(cur->operand());
+    cur = cur->assigned_from();
+  } while (cur != start);
+  circle_operands.Add(marker_operand_);
+
+  for (int i = circle_operands.length() - 1; i > 0; --i) {
+    LOperand* from = circle_operands[i];
+    LOperand* to = circle_operands[i - 1];
+    AddResultMove(from, to);
+  }
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
+  ASSERT(a != b);
+  LGapNode* cur = a;
+  while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
+    cur->set_visited_id(visited_id);
+    cur = cur->assigned_from();
+  }
+
+  return cur == b;
+}
+
+
+bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
+  ASSERT(a != b);
+  return CanReach(a, b, next_visited_id_++);
+}
+
+
+void LGapResolver::RegisterMove(LMoveOperands move) {
+  if (move.from()->IsConstantOperand()) {
+    // Constant moves should be last in the machine code. Therefore add them
+    // first to the result set.
+    AddResultMove(move.from(), move.to());
+  } else {
+    LGapNode* from = LookupNode(move.from());
+    LGapNode* to = LookupNode(move.to());
+    if (to->IsAssigned() && to->assigned_from() == from) {
+      move.Eliminate();
+      return;
+    }
+    ASSERT(!to->IsAssigned());
+    if (CanReach(from, to)) {
+      // This introduces a circle. Save.
+      identified_cycles_.Add(from);
+    }
+    to->set_assigned_from(from);
+  }
+}
+
+
+LGapNode* LGapResolver::LookupNode(LOperand* operand) {
+  for (int i = 0; i < nodes_.length(); ++i) {
+    if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
+  }
+
+  // No node found => create a new one.
+  LGapNode* result = new LGapNode(operand);
+  nodes_.Add(result);
+  return result;
+}
+
+
+Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+LChunk* LChunkBuilder::Build() {
+  ASSERT(is_unused());
+  chunk_ = new LChunk(graph());
+  HPhase phase("Building chunk", chunk_);
+  status_ = BUILDING;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::Abort(const char* format, ...) {
+  if (FLAG_trace_bailout) {
+    SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
+    PrintF("Aborting LChunk building in @\"%s\": ", *debug_name);
+    va_list arguments;
+    va_start(arguments, format);
+    OS::VPrint(format, arguments);
+    va_end(arguments);
+    PrintF("\n");
+  }
+  status_ = ABORTED;
+}
+
+
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          XMMRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  allocator_->RecordUse(value, operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsAny(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_ANY_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
+                                               XMMRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  instr->set_environment(CreateEnvironment(hydrogen_env));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
+    LInstruction* instr, int ast_id) {
+  ASSERT(instructions_pending_deoptimization_environment_ == NULL);
+  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+  instructions_pending_deoptimization_environment_ = instr;
+  pending_deoptimization_ast_id_ = ast_id;
+  return instr;
+}
+
+
+void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
+  instructions_pending_deoptimization_environment_ = NULL;
+  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  allocator_->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  if (hinstr->HasSideEffects()) {
+    ASSERT(hinstr->next()->IsSimulate());
+    HSimulate* sim = HSimulate::cast(hinstr->next());
+    instr = SetInstructionPendingDeoptimizationEnvironment(
+        instr, sim->ast_id());
+  }
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  ASSERT(!instr->HasPointerMap());
+  instr->set_pointer_map(new LPointerMap(position_));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
+  allocator_->RecordDefinition(current_instruction_, result);
+  instr->set_result(result);
+  return instr;
+}
+
+
+LOperand* LChunkBuilder::Temp() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::NONE);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  allocator_->RecordTemporary(operand);
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().IsInteger32());
+  ASSERT(instr->right()->representation().IsInteger32());
+
+  LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+  LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+  return DefineSameAsFirst(new LBitI(op, left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->OperandAt(0)->representation().IsInteger32());
+  ASSERT(instr->OperandAt(1)->representation().IsInteger32());
+  LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+
+  HValue* right_value = instr->OperandAt(1);
+  LOperand* right = NULL;
+  int constant_value = 0;
+  if (right_value->IsConstant()) {
+    HConstant* constant = HConstant::cast(right_value);
+    right = chunk_->DefineConstantOperand(constant);
+    constant_value = constant->Integer32Value() & 0x1f;
+  } else {
+    right = UseFixed(right_value, ecx);
+  }
+
+  // Shift operations can only deoptimize if we do a logical shift
+  // by 0 and the result cannot be truncated to int32.
+  bool can_deopt = (op == Token::SHR && constant_value == 0);
+  if (can_deopt) {
+    bool can_truncate = true;
+    for (int i = 0; i < instr->uses()->length(); i++) {
+      if (!instr->uses()->at(i)->CheckFlag(HValue::kTruncatingToInt32)) {
+        can_truncate = false;
+        break;
+      }
+    }
+    can_deopt = !can_truncate;
+  }
+
+  LInstruction* result =
+      DefineSameAsFirst(new LShiftI(op, left, right, can_deopt));
+  if (can_deopt) AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(instr->representation().IsDouble());
+  ASSERT(instr->left()->representation().IsDouble());
+  ASSERT(instr->right()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LArithmeticD* result = new LArithmeticD(op, left, right);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  ASSERT(op == Token::ADD ||
+         op == Token::DIV ||
+         op == Token::MOD ||
+         op == Token::MUL ||
+         op == Token::SUB);
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  ASSERT(left->representation().IsTagged());
+  ASSERT(right->representation().IsTagged());
+  LOperand* left_operand = UseFixed(left, edx);
+  LOperand* right_operand = UseFixed(right, eax);
+  LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  ASSERT(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    ASSERT(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    ASSERT(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      ASSERT(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    ASSERT(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      last_environment->SetValueAt(phi->merged_index(), phi);
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                   graph_->GetConstantUndefined());
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    if (FLAG_trace_environment) {
+      PrintF("Process instruction %d\n", current->id());
+    }
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+  allocator_->BeginInstruction();
+  if (current->has_position()) position_ = current->position();
+  LInstruction* instr = current->CompileToLithium(this);
+
+  if (instr != NULL) {
+    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+      instr = AssignPointerMap(instr);
+    }
+    if (FLAG_stress_environments && !instr->HasEnvironment()) {
+      instr = AssignEnvironment(instr);
+    }
+    if (current->IsBranch()) {
+      instr->set_hydrogen_value(HBranch::cast(current)->value());
+    } else {
+      instr->set_hydrogen_value(current);
+    }
+
+    int index = chunk_->AddInstruction(instr, current_block_);
+    allocator_->SummarizeInstruction(index);
+  } else {
+    // This instruction should be omitted.
+    allocator_->OmitInstruction();
+  }
+  current_instruction_ = old_current;
+}
+
+
+void LEnvironment::WriteTranslation(LCodeGen* cgen,
+                                    Translation* translation) const {
+  if (this == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = values()->length();
+  // The output frame height does not include the parameters.
+  int height = translation_size - parameter_count();
+
+  outer()->WriteTranslation(cgen, translation);
+  int closure_id = cgen->DefineDeoptimizationLiteral(closure());
+  translation->BeginFrame(ast_id(), closure_id, height);
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = values()->at(i);
+    // spilled_registers_ and spilled_double_registers_ are either
+    // both NULL or both set.
+    if (spilled_registers_ != NULL && value != NULL) {
+      if (value->IsRegister() &&
+          spilled_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_registers_[value->index()],
+                               HasTaggedValueAt(i));
+      } else if (value->IsDoubleRegister() &&
+                 spilled_double_registers_[value->index()] != NULL) {
+        translation->MarkDuplicate();
+        cgen->AddToTranslation(translation,
+                               spilled_double_registers_[value->index()],
+                               false);
+      }
+    }
+
+    cgen->AddToTranslation(translation, value, HasTaggedValueAt(i));
+  }
+}
+
+
+void LEnvironment::PrintTo(StringStream* stream) const {
+  stream->Add("[id=%d|", ast_id());
+  stream->Add("[parameters=%d|", parameter_count());
+  stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
+    }
+  }
+  stream->Add("]");
+}
+
+
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  int ast_id = hydrogen_env->ast_id();
+  ASSERT(ast_id != AstNode::kNoNumber);
+  int value_count = hydrogen_env->values()->length();
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
+  for (int i = 0; i < value_count; ++i) {
+    HValue* value = hydrogen_env->values()->at(i);
+    LOperand* op = NULL;
+    if (value->IsArgumentsObject()) {
+      op = NULL;
+    } else if (value->IsPushArgument()) {
+      op = new LArgument(argument_index++);
+    } else {
+      op = UseOrConstant(value);
+      if (op->IsUnallocated()) {
+        LUnallocated* unalloc = LUnallocated::cast(op);
+        unalloc->set_policy(LUnallocated::ANY);
+      }
+    }
+    result->AddValue(op, value->representation());
+  }
+
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
+                                   instr->include_stack_check());
+  if (instr->include_stack_check()) result = AssignPointerMap(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* v = instr->value();
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  ASSERT(first != NULL && second != NULL);
+  int first_id = first->block_id();
+  int second_id = second->block_id();
+
+  if (v->EmitAtUses()) {
+    if (v->IsClassOfTest()) {
+      HClassOfTest* compare = HClassOfTest::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+                                       TempRegister(),
+                                       TempRegister(),
+                                       first_id,
+                                       second_id);
+    } else if (v->IsCompare()) {
+      HCompare* compare = HCompare::cast(v);
+      Token::Value op = compare->token();
+      HValue* left = compare->left();
+      HValue* right = compare->right();
+      if (left->representation().IsInteger32()) {
+        ASSERT(right->representation().IsInteger32());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseOrConstantAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   false);
+      } else if (left->representation().IsDouble()) {
+        ASSERT(right->representation().IsDouble());
+        return new LCmpIDAndBranch(op,
+                                   UseRegisterAtStart(left),
+                                   UseRegisterAtStart(right),
+                                   first_id,
+                                   second_id,
+                                   true);
+      } else {
+        ASSERT(left->representation().IsTagged());
+        ASSERT(right->representation().IsTagged());
+        bool reversed = op == Token::GT || op == Token::LTE;
+        LOperand* left_operand = UseFixed(left, reversed ? eax : edx);
+        LOperand* right_operand = UseFixed(right, reversed ? edx : eax);
+        LInstruction* result = new LCmpTAndBranch(left_operand,
+                                                  right_operand,
+                                                  first_id,
+                                                  second_id);
+        return MarkAsCall(result, instr);
+      }
+    } else if (v->IsIsSmi()) {
+      HIsSmi* compare = HIsSmi::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LIsSmiAndBranch(Use(compare->value()),
+                                 first_id,
+                                 second_id);
+    } else if (v->IsHasInstanceType()) {
+      HHasInstanceType* compare = HHasInstanceType::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
+                                           TempRegister(),
+                                           first_id,
+                                           second_id);
+    } else if (v->IsHasCachedArrayIndex()) {
+      HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      return new LHasCachedArrayIndexAndBranch(
+          UseRegisterAtStart(compare->value()), first_id, second_id);
+    } else if (v->IsIsNull()) {
+      HIsNull* compare = HIsNull::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      // We only need a temp register for non-strict compare.
+      LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+      return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+                                  compare->is_strict(),
+                                  temp,
+                                  first_id,
+                                  second_id);
+    } else if (v->IsIsObject()) {
+      HIsObject* compare = HIsObject::cast(v);
+      ASSERT(compare->value()->representation().IsTagged());
+
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
+                                    temp1,
+                                    temp2,
+                                    first_id,
+                                    second_id);
+    } else if (v->IsCompareJSObjectEq()) {
+      HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+      return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+                                         UseRegisterAtStart(compare->right()),
+                                         first_id,
+                                         second_id);
+    } else if (v->IsInstanceOf()) {
+      HInstanceOf* instance_of = HInstanceOf::cast(v);
+      LInstruction* result =
+          new LInstanceOfAndBranch(UseFixed(instance_of->left(), eax),
+                                   UseFixed(instance_of->right(), edx),
+                                   first_id,
+                                   second_id);
+      return MarkAsCall(result, instr);
+    } else if (v->IsTypeofIs()) {
+      HTypeofIs* typeof_is = HTypeofIs::cast(v);
+      return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
+                                    first_id,
+                                    second_id);
+    } else {
+      if (v->IsConstant()) {
+        if (HConstant::cast(v)->handle()->IsTrue()) {
+          return new LGoto(first_id);
+        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+          return new LGoto(second_id);
+        }
+      }
+      Abort("Undefined compare before branch");
+      return NULL;
+    }
+  }
+  return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMapAndBranch(
+    HCompareMapAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  HBasicBlock* first = instr->FirstSuccessor();
+  HBasicBlock* second = instr->SecondSuccessor();
+  return new LCmpMapAndBranch(value,
+                              instr->map(),
+                              first->block_id(),
+                              second->block_id());
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  return DefineAsRegister(new LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LInstruction* result =
+      new LInstanceOf(UseFixed(instr->left(), eax),
+                      UseFixed(instr->right(), edx));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* receiver = UseFixed(instr->receiver(), eax);
+  LOperand* length = UseRegisterAtStart(instr->length());
+  LOperand* elements = UseRegisterAtStart(instr->elements());
+  LInstruction* result = new LApplyArguments(function,
+                                             receiver,
+                                             length,
+                                             elements);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
+  ++argument_count_;
+  LOperand* argument = UseOrConstant(instr->argument());
+  return new LPushArgument(argument);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
+  return DefineAsRegister(new LGlobalObject);
+}
+
+
+LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
+  return DefineAsRegister(new LGlobalReceiver);
+}
+
+
+LInstruction* LChunkBuilder::DoCallConstantFunction(
+    HCallConstantFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  BuiltinFunctionId op = instr->op();
+  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+    LOperand* input = UseFixedDouble(instr->value(), xmm1);
+    LInstruction* result = new LUnaryMathOperation(input);
+    return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  } else {
+    LOperand* input = UseRegisterAtStart(instr->value());
+    LInstruction* result = new LUnaryMathOperation(input);
+    switch (op) {
+      case kMathAbs:
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      case kMathFloor:
+        return AssignEnvironment(DefineAsRegister(result));
+      case kMathRound:
+        return AssignEnvironment(DefineAsRegister(result));
+      case kMathSqrt:
+        return DefineSameAsFirst(result);
+      case kMathPowHalf:
+        return AssignEnvironment(DefineSameAsFirst(result));
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
+  ASSERT(instr->key()->representation().IsTagged());
+  argument_count_ -= instr->argument_count();
+  UseFixed(instr->key(), ecx);
+  return MarkAsCall(DefineFixed(new LCallKeyed, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallNamed, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallGlobal, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* constructor = UseFixed(instr->constructor(), edi);
+  argument_count_ -= instr->argument_count();
+  LInstruction* result = new LCallNew(constructor);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallFunction, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallRuntime, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
+  ASSERT(instr->value()->representation().IsInteger32());
+  ASSERT(instr->representation().IsInteger32());
+  return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else if (instr->representation().IsInteger32()) {
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(edx);
+    LOperand* value = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    return AssignEnvironment(DefineFixed(new LDivI(value, divisor), eax));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    // The temporary operand is necessary to ensure that right is not allocated
+    // into edx.
+    FixedTemp(edx);
+    LOperand* value = UseFixed(instr->left(), eax);
+    LOperand* divisor = UseRegister(instr->right());
+    LInstruction* result = DefineFixed(new LModI(value, divisor), edx);
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+        instr->CheckFlag(HValue::kCanBeDivByZero)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsTagged()) {
+    return DoArithmeticT(Token::MOD, instr);
+  } else {
+    ASSERT(instr->representation().IsDouble());
+    // We call a C function for double modulo. It can't trigger a GC.
+    // We need to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    LOperand* left = UseFixedDouble(instr->left(), xmm1);
+    LOperand* right = UseFixedDouble(instr->right(), xmm2);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstant(instr->MostConstantOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new LMulI(left, right, temp);
+    return AssignEnvironment(DefineSameAsFirst(mul));
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LSubI* sub = new LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    LAddI* add = new LAddI(left, right);
+    LInstruction* result = DefineSameAsFirst(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  ASSERT(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  ASSERT(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), xmm1);
+  LOperand* right = exponent_type.IsDouble() ?
+      UseFixedDouble(instr->right(), xmm2) :
+      UseFixed(instr->right(), eax);
+  LPower* result = new LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+  Token::Value op = instr->token();
+  if (instr->left()->representation().IsInteger32()) {
+    ASSERT(instr->right()->representation().IsInteger32());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, false));
+  } else if (instr->left()->representation().IsDouble()) {
+    ASSERT(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return DefineAsRegister(new LCmpID(op, left, right, true));
+  } else {
+    bool reversed = (op == Token::GT || op == Token::LTE);
+    LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+    LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+    LInstruction* result = new LCmpT(left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareJSObjectEq(
+    HCompareJSObjectEq* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LInstruction* result = new LCmpJSObjectEq(left, right);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LIsNull(value,
+                                      instr->is_strict()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LIsObject(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseAtStart(instr->value());
+
+  return DefineAsRegister(new LIsSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new LHasInstanceType(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
+    HHasCachedArrayIndex* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+
+  return DefineAsRegister(new LHasCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* value = UseTempRegister(instr->value());
+
+  return DefineSameAsFirst(new LClassOfTest(value, TempRegister()));
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLength(HArrayLength* instr) {
+  LOperand* array = NULL;
+  LOperand* temporary = NULL;
+
+  if (instr->value()->IsLoadElements()) {
+    array = UseRegisterAtStart(instr->value());
+  } else {
+    array = UseRegister(instr->value());
+    temporary = TempRegister();
+  }
+
+  LInstruction* result = new LArrayLength(array, temporary);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
+  LOperand* object = UseRegister(instr->value());
+  LInstruction* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            Use(instr->length())));
+}
+
+
+LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
+  LOperand* value = UseFixed(instr->value(), eax);
+  return MarkAsCall(new LThrow(value), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(instr->value());
+      LInstruction* res = new LNumberUntagD(value);
+      return AssignEnvironment(DefineAsRegister(res));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      bool needs_check = !instr->value()->type().IsSmi();
+      if (needs_check) {
+        LOperand* xmm_temp =
+            (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+            ? NULL
+            : FixedTemp(xmm1);
+        LInstruction* res = new LTaggedToI(value, xmm_temp);
+        return AssignEnvironment(DefineSameAsFirst(res));
+      } else {
+        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(instr->value());
+      LOperand* temp = TempRegister();
+
+      // Make sure that temp and result_temp are different registers.
+      LUnallocated* result_temp = TempRegister();
+      LInstruction* result = new LNumberTagD(value, temp);
+      return AssignPointerMap(Define(result, result_temp));
+    } else {
+      ASSERT(to.IsInteger32());
+      LOperand* value = UseRegister(instr->value());
+      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
+    }
+  } else if (from.IsInteger32()) {
+    if (to.IsTagged()) {
+      HValue* val = instr->value();
+      LOperand* value = UseRegister(val);
+      if (val->HasRange() && val->range()->IsInSmiRange()) {
+        return DefineSameAsFirst(new LSmiTag(value));
+      } else {
+        LInstruction* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+      }
+    } else {
+      ASSERT(to.IsDouble());
+      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, zero));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = new LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      new LCheckPrototypeMaps(temp,
+                              instr->holder(),
+                              instr->receiver_map());
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckSmi(value, not_zero));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new LCheckMap(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  return new LReturn(UseFixed(instr->value(), eax));
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsInteger32()) {
+    int32_t value = instr->Integer32Value();
+    return DefineAsRegister(new LConstantI(value));
+  } else if (r.IsDouble()) {
+    double value = instr->DoubleValue();
+    return DefineAsRegister(new LConstantD(value));
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new LConstantT(instr->handle()));
+  } else {
+    Abort("unsupported constant of type double");
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
+  LInstruction* result = new LLoadGlobal;
+  return instr->check_hole_value()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+  return new LStoreGlobal(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  return DefineAsRegister(
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), eax);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), eax);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return DefineSameAsFirst(new LLoadElements(input));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+    HLoadKeyedFastElement* instr) {
+  Representation r = instr->representation();
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  ASSERT(instr->key()->representation().IsInteger32());
+  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* load_result = NULL;
+  // Double needs an extra temp, because the result is converted from heap
+  // number to a double register.
+  if (r.IsDouble()) load_result = TempRegister();
+  LInstruction* result = new LLoadKeyedFastElement(obj,
+                                                   key,
+                                                   load_result);
+  if (r.IsDouble()) {
+    result = DefineAsRegister(result);
+  } else {
+    result = DefineSameAsFirst(result);
+  }
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* object = UseFixed(instr->object(), edx);
+  LOperand* key = UseFixed(instr->key(), eax);
+
+  LInstruction* result =
+      DefineFixed(new LLoadKeyedGeneric(object, key), eax);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+    HStoreKeyedFastElement* instr) {
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  ASSERT(instr->value()->representation().IsTagged());
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsInteger32());
+
+  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegisterAtStart(instr->value());
+  LOperand* key = needs_write_barrier
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), edx);
+  LOperand* key = UseFixed(instr->key(), ecx);
+  LOperand* val = UseFixed(instr->value(), eax);
+
+  ASSERT(instr->object()->representation().IsTagged());
+  ASSERT(instr->key()->representation().IsTagged());
+  ASSERT(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool needs_write_barrier = !instr->value()->type().IsSmi();
+
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
+
+  LOperand* val = needs_write_barrier
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
+      ? TempRegister() : NULL;
+
+  return new LStoreNamedField(obj,
+                              instr->name(),
+                              val,
+                              instr->is_in_object(),
+                              instr->offset(),
+                              temp,
+                              needs_write_barrier,
+                              instr->transition());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* obj = UseFixed(instr->object(), edx);
+  LOperand* val = UseFixed(instr->value(), eax);
+
+  LInstruction* result = new LStoreNamedGeneric(obj, instr->name(), val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LObjectLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
+  LInstruction* result = new LDeleteProperty(Use(instr->object()),
+                                             UseOrConstant(instr->key()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  int spill_index = chunk()->GetParameterStackSlot(instr->index());
+  return DefineAsSpilled(new LParameter, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  argument_count_ -= instr->argument_count();
+  return MarkAsCall(DefineFixed(new LCallStub, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object (we bail out in all other
+  // cases).
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  LOperand* arguments = UseRegister(instr->arguments());
+  LOperand* length = UseTempRegister(instr->length());
+  LOperand* index = Use(instr->index());
+  LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
+  return DefineAsRegister(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LInstruction* result = new LTypeof(Use(instr->value()));
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
+  return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+}
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  HEnvironment* env = current_block_->last_environment();
+  ASSERT(env != NULL);
+
+  env->set_ast_id(instr->ast_id());
+
+  env->Drop(instr->pop_count());
+  for (int i = 0; i < instr->values()->length(); ++i) {
+    HValue* value = instr->values()->at(i);
+    if (instr->HasAssignedIndexAt(i)) {
+      env->Bind(instr->GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+
+  if (FLAG_trace_environment) {
+    PrintF("Reconstructed environment ast_id=%d, instr_id=%d\n",
+           instr->ast_id(),
+           instr->id());
+    env->PrintToStd();
+  }
+  ASSERT(env->values()->length() == instr->environment_height());
+
+  // If there is an instruction pending deoptimization environment create a
+  // lazy bailout instruction to capture the environment.
+  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+    LInstruction* result = new LLazyBailout;
+    result = AssignEnvironment(result);
+    instructions_pending_deoptimization_environment_->
+        set_deoptimization_environment(result->environment());
+    ClearInstructionPendingDeoptimizationEnvironment();
+    return result;
+  }
+
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  return MarkAsCall(new LStackCheck, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->function(),
+                                               false,
+                                               undefined);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment()->outer();
+  current_block_->UpdateEnvironment(outer);
+  return NULL;
+}
+
+
+void LPointerMap::RecordPointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op);
+}
+
+
+void LPointerMap::PrintTo(StringStream* stream) const {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("} @%d", position());
+}
+
+} }  // namespace v8::internal
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
new file mode 100644
index 0000000..3f48e50
--- /dev/null
+++ b/src/ia32/lithium-ia32.h
@@ -0,0 +1,2128 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_IA32_H_
+#define V8_IA32_LITHIUM_IA32_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+class LGapNode;
+
+
+// Type hierarchy:
+//
+// LInstruction
+//   LAccessArgumentsAt
+//   LArgumentsElements
+//   LArgumentsLength
+//   LBinaryOperation
+//     LAddI
+//     LApplyArguments
+//     LArithmeticD
+//     LArithmeticT
+//     LBitI
+//     LBoundsCheck
+//     LCmpID
+//     LCmpIDAndBranch
+//     LCmpJSObjectEq
+//     LCmpJSObjectEqAndBranch
+//     LCmpT
+//     LDivI
+//     LInstanceOf
+//     LInstanceOfAndBranch
+//     LLoadKeyedFastElement
+//     LLoadKeyedGeneric
+//     LModI
+//     LMulI
+//     LPower
+//     LShiftI
+//     LSubI
+//   LCallConstantFunction
+//   LCallFunction
+//   LCallGlobal
+//   LCallKeyed
+//   LCallKnownGlobal
+//   LCallNamed
+//   LCallRuntime
+//   LCallStub
+//   LConstant
+//     LConstantD
+//     LConstantI
+//     LConstantT
+//   LDeoptimize
+//   LFunctionLiteral
+//   LGlobalObject
+//   LGlobalReceiver
+//   LLabel
+//   LLayzBailout
+//   LLoadGlobal
+//   LMaterializedLiteral
+//     LArrayLiteral
+//     LObjectLiteral
+//     LRegExpLiteral
+//   LOsrEntry
+//   LParameter
+//   LRegExpConstructResult
+//   LStackCheck
+//   LStoreKeyed
+//     LStoreKeyedFastElement
+//     LStoreKeyedGeneric
+//   LStoreNamed
+//     LStoreNamedField
+//     LStoreNamedGeneric
+//   LUnaryOperation
+//     LArrayLength
+//     LBitNotI
+//     LBranch
+//     LCallNew
+//     LCheckFunction
+//     LCheckInstanceType
+//     LCheckMap
+//     LCheckPrototypeMaps
+//     LCheckSmi
+//     LClassOfTest
+//     LClassOfTestAndBranch
+//     LDeleteProperty
+//     LDoubleToI
+//     LHasCachedArrayIndex
+//     LHasCachedArrayIndexAndBranch
+//     LHasInstanceType
+//     LHasInstanceTypeAndBranch
+//     LInteger32ToDouble
+//     LIsNull
+//     LIsNullAndBranch
+//     LIsObject
+//     LIsObjectAndBranch
+//     LIsSmi
+//     LIsSmiAndBranch
+//     LLoadNamedField
+//     LLoadNamedGeneric
+//     LNumberTagD
+//     LNumberTagI
+//     LPushArgument
+//     LReturn
+//     LSmiTag
+//     LStoreGlobal
+//     LTaggedToI
+//     LThrow
+//     LTypeof
+//     LTypeofIs
+//     LTypeofIsAndBranch
+//     LUnaryMathOperation
+//     LValueOf
+//   LUnknownOSRValue
+
+#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
+  V(BinaryOperation)                            \
+  V(Constant)                                   \
+  V(Call)                                       \
+  V(MaterializedLiteral)                        \
+  V(StoreKeyed)                                 \
+  V(StoreNamed)                                 \
+  V(UnaryOperation)                             \
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
+
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
+  V(AccessArgumentsAt)                          \
+  V(AddI)                                       \
+  V(ApplyArguments)                             \
+  V(ArgumentsElements)                          \
+  V(ArgumentsLength)                            \
+  V(ArithmeticD)                                \
+  V(ArithmeticT)                                \
+  V(ArrayLength)                                \
+  V(ArrayLiteral)                               \
+  V(BitI)                                       \
+  V(BitNotI)                                    \
+  V(BoundsCheck)                                \
+  V(Branch)                                     \
+  V(CallConstantFunction)                       \
+  V(CallFunction)                               \
+  V(CallGlobal)                                 \
+  V(CallKeyed)                                  \
+  V(CallKnownGlobal)                            \
+  V(CallNamed)                                  \
+  V(CallNew)                                    \
+  V(CallRuntime)                                \
+  V(CallStub)                                   \
+  V(CheckFunction)                              \
+  V(CheckInstanceType)                          \
+  V(CheckMap)                                   \
+  V(CheckPrototypeMaps)                         \
+  V(CheckSmi)                                   \
+  V(CmpID)                                      \
+  V(CmpIDAndBranch)                             \
+  V(CmpJSObjectEq)                              \
+  V(CmpJSObjectEqAndBranch)                     \
+  V(CmpMapAndBranch)                            \
+  V(CmpT)                                       \
+  V(CmpTAndBranch)                              \
+  V(ConstantD)                                  \
+  V(ConstantI)                                  \
+  V(ConstantT)                                  \
+  V(DeleteProperty)                             \
+  V(Deoptimize)                                 \
+  V(DivI)                                       \
+  V(DoubleToI)                                  \
+  V(FunctionLiteral)                            \
+  V(Gap)                                        \
+  V(GlobalObject)                               \
+  V(GlobalReceiver)                             \
+  V(Goto)                                       \
+  V(InstanceOf)                                 \
+  V(InstanceOfAndBranch)                        \
+  V(Integer32ToDouble)                          \
+  V(IsNull)                                     \
+  V(IsNullAndBranch)                            \
+  V(IsObject)                                   \
+  V(IsObjectAndBranch)                          \
+  V(IsSmi)                                      \
+  V(IsSmiAndBranch)                             \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
+  V(HasCachedArrayIndex)                        \
+  V(HasCachedArrayIndexAndBranch)               \
+  V(ClassOfTest)                                \
+  V(ClassOfTestAndBranch)                       \
+  V(Label)                                      \
+  V(LazyBailout)                                \
+  V(LoadElements)                               \
+  V(LoadGlobal)                                 \
+  V(LoadKeyedFastElement)                       \
+  V(LoadKeyedGeneric)                           \
+  V(LoadNamedField)                             \
+  V(LoadNamedGeneric)                           \
+  V(ModI)                                       \
+  V(MulI)                                       \
+  V(NumberTagD)                                 \
+  V(NumberTagI)                                 \
+  V(NumberUntagD)                               \
+  V(ObjectLiteral)                              \
+  V(OsrEntry)                                   \
+  V(Parameter)                                  \
+  V(Power)                                      \
+  V(PushArgument)                               \
+  V(RegExpLiteral)                              \
+  V(Return)                                     \
+  V(ShiftI)                                     \
+  V(SmiTag)                                     \
+  V(SmiUntag)                                   \
+  V(StackCheck)                                 \
+  V(StoreGlobal)                                \
+  V(StoreKeyedFastElement)                      \
+  V(StoreKeyedGeneric)                          \
+  V(StoreNamedField)                            \
+  V(StoreNamedGeneric)                          \
+  V(SubI)                                       \
+  V(TaggedToI)                                  \
+  V(Throw)                                      \
+  V(Typeof)                                     \
+  V(TypeofIs)                                   \
+  V(TypeofIsAndBranch)                          \
+  V(UnaryMathOperation)                         \
+  V(UnknownOSRValue)                            \
+  V(ValueOf)
+
+
+#define DECLARE_INSTRUCTION(type)                \
+  virtual bool Is##type() const { return true; } \
+  static L##type* cast(LInstruction* instr) {    \
+    ASSERT(instr->Is##type());                   \
+    return reinterpret_cast<L##type*>(instr);    \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)        \
+  virtual void CompileToNative(LCodeGen* generator);        \
+  virtual const char* Mnemonic() const { return mnemonic; } \
+  DECLARE_INSTRUCTION(type)
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction()
+      : hydrogen_value_(NULL) { }
+  virtual ~LInstruction() { }
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream) const;
+  virtual void PrintDataTo(StringStream* stream) const { }
+
+  // Declare virtual type testers.
+#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
+  LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+  virtual bool IsControl() const { return false; }
+
+  void set_environment(LEnvironment* env) { environment_.set(env); }
+  LEnvironment* environment() const { return environment_.get(); }
+  bool HasEnvironment() const { return environment_.is_set(); }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_result(LOperand* operand) { result_.set(operand); }
+  LOperand* result() const { return result_.get(); }
+  bool HasResult() const { return result_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void set_deoptimization_environment(LEnvironment* env) {
+    deoptimization_environment_.set(env);
+  }
+  LEnvironment* deoptimization_environment() const {
+    return deoptimization_environment_.get();
+  }
+  bool HasDeoptimizationEnvironment() const {
+    return deoptimization_environment_.is_set();
+  }
+
+ private:
+  SetOncePointer<LEnvironment> environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  SetOncePointer<LOperand> result_;
+  HValue* hydrogen_value_;
+  SetOncePointer<LEnvironment> deoptimization_environment_;
+};
+
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  LGapResolver(const ZoneList<LMoveOperands>* moves, LOperand* marker_operand);
+  const ZoneList<LMoveOperands>* ResolveInReverseOrder();
+
+ private:
+  LGapNode* LookupNode(LOperand* operand);
+  bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
+  bool CanReach(LGapNode* a, LGapNode* b);
+  void RegisterMove(LMoveOperands move);
+  void AddResultMove(LOperand* from, LOperand* to);
+  void AddResultMove(LGapNode* from, LGapNode* to);
+  void ResolveCycle(LGapNode* start);
+
+  ZoneList<LGapNode*> nodes_;
+  ZoneList<LGapNode*> identified_cycles_;
+  ZoneList<LMoveOperands> result_;
+  LOperand* marker_operand_;
+  int next_visited_id_;
+  int bailout_after_ast_id_;
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() : move_operands_(4) { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    move_operands_.Add(LMoveOperands(from, to));
+  }
+
+  bool IsRedundant() const;
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+  void PrintDataTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
+    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LGoto: public LInstruction {
+ public:
+  LGoto(int block_id, bool include_stack_check = false)
+    : block_id_(block_id), include_stack_check_(include_stack_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int block_id() const { return block_id_; }
+  bool include_stack_check() const { return include_stack_check_; }
+
+ private:
+  int block_id_;
+  bool include_stack_check_;
+};
+
+
+class LLazyBailout: public LInstruction {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDeoptimize: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+
+  TranscendentalCache::Type transcendental_type() {
+    return hydrogen()->transcendental_type();
+  }
+};
+
+
+class LUnknownOSRValue: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+class LUnaryOperation: public LInstruction {
+ public:
+  explicit LUnaryOperation(LOperand* input) : input_(input) { }
+
+  DECLARE_INSTRUCTION(UnaryOperation)
+
+  LOperand* input() const { return input_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* input_;
+};
+
+
+class LBinaryOperation: public LInstruction {
+ public:
+  LBinaryOperation(LOperand* left, LOperand* right)
+      : left_(left), right_(right) { }
+
+  DECLARE_INSTRUCTION(BinaryOperation)
+
+  LOperand* left() const { return left_; }
+  LOperand* right() const { return right_; }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* left_;
+  LOperand* right_;
+};
+
+
+class LApplyArguments: public LBinaryOperation {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements)
+      : LBinaryOperation(function, receiver),
+        length_(length),
+        elements_(elements) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() const { return left(); }
+  LOperand* receiver() const { return right(); }
+  LOperand* length() const { return length_; }
+  LOperand* elements() const { return elements_; }
+
+ private:
+  LOperand* length_;
+  LOperand* elements_;
+};
+
+
+class LAccessArgumentsAt: public LInstruction {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
+      : arguments_(arguments), length_(length), index_(index) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() const { return arguments_; }
+  LOperand* length() const { return length_; }
+  LOperand* index() const { return index_; }
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+ private:
+  LOperand* arguments_;
+  LOperand* length_;
+  LOperand* index_;
+};
+
+
+class LArgumentsLength: public LUnaryOperation {
+ public:
+  explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements: public LInstruction {
+ public:
+  LArgumentsElements() { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+};
+
+
+class LModI: public LBinaryOperation {
+ public:
+  LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivI: public LBinaryOperation {
+ public:
+  LDivI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+};
+
+
+class LMulI: public LBinaryOperation {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp)
+      : LBinaryOperation(left, right), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCmpID: public LBinaryOperation {
+ public:
+  LCmpID(Token::Value op, LOperand* left, LOperand* right, bool is_double)
+      : LBinaryOperation(left, right), op_(op), is_double_(is_double) { }
+
+  Token::Value op() const { return op_; }
+  bool is_double() const { return is_double_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+
+ private:
+  Token::Value op_;
+  bool is_double_;
+};
+
+
+class LCmpIDAndBranch: public LCmpID {
+ public:
+  LCmpIDAndBranch(Token::Value op,
+                  LOperand* left,
+                  LOperand* right,
+                  int true_block_id,
+                  int false_block_id,
+                  bool is_double)
+      : LCmpID(op, left, right, is_double),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LUnaryMathOperation: public LUnaryOperation {
+ public:
+  explicit LUnaryMathOperation(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  BuiltinFunctionId op() const { return hydrogen()->op(); }
+};
+
+
+class LCmpJSObjectEq: public LBinaryOperation {
+ public:
+  LCmpJSObjectEq(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+};
+
+
+class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+ public:
+  LCmpJSObjectEqAndBranch(LOperand* left,
+                          LOperand* right,
+                          int true_block_id,
+                          int false_block_id)
+      : LCmpJSObjectEq(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
+                               "cmp-jsobject-eq-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsNull: public LUnaryOperation {
+ public:
+  LIsNull(LOperand* value, bool is_strict)
+      : LUnaryOperation(value), is_strict_(is_strict) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
+
+  bool is_strict() const { return is_strict_; }
+
+ private:
+  bool is_strict_;
+};
+
+
+class LIsNullAndBranch: public LIsNull {
+ public:
+  LIsNullAndBranch(LOperand* value,
+                   bool is_strict,
+                   LOperand* temp,
+                   int true_block_id,
+                   int false_block_id)
+      : LIsNull(value, is_strict),
+        temp_(temp),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsObject: public LUnaryOperation {
+ public:
+  LIsObject(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LIsObjectAndBranch: public LIsObject {
+ public:
+  LIsObjectAndBranch(LOperand* value,
+                     LOperand* temp,
+                     LOperand* temp2,
+                     int true_block_id,
+                     int false_block_id)
+      : LIsObject(value, temp),
+        temp2_(temp2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp2() const { return temp2_; }
+
+ private:
+  LOperand* temp2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LIsSmi: public LUnaryOperation {
+ public:
+  explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmi)
+};
+
+
+class LIsSmiAndBranch: public LIsSmi {
+ public:
+  LIsSmiAndBranch(LOperand* value,
+                  int true_block_id,
+                  int false_block_id)
+      : LIsSmi(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasInstanceType: public LUnaryOperation {
+ public:
+  explicit LHasInstanceType(LOperand* value)
+      : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+
+  InstanceType TestType();  // The type to test against when generating code.
+  Condition BranchCondition();  // The branch condition for 'true'.
+};
+
+
+class LHasInstanceTypeAndBranch: public LHasInstanceType {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value,
+                            LOperand* temporary,
+                            int true_block_id,
+                            int false_block_id)
+      : LHasInstanceType(value),
+        temp_(temporary),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+  LOperand* temp() { return temp_; }
+
+ private:
+  LOperand* temp_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LHasCachedArrayIndex: public LUnaryOperation {
+ public:
+  explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value,
+                                int true_block_id,
+                                int false_block_id)
+      : LHasCachedArrayIndex(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LClassOfTest: public LUnaryOperation {
+ public:
+  LClassOfTest(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temporary_(temp) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* temporary() { return temporary_; }
+
+ private:
+  LOperand *temporary_;
+};
+
+
+class LClassOfTestAndBranch: public LClassOfTest {
+ public:
+  LClassOfTestAndBranch(LOperand* value,
+                        LOperand* temporary,
+                        LOperand* temporary2,
+                        int true_block_id,
+                        int false_block_id)
+      : LClassOfTest(value, temporary),
+        temporary2_(temporary2),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+  LOperand* temporary2() { return temporary2_; }
+
+ private:
+  LOperand* temporary2_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpT: public LBinaryOperation {
+ public:
+  LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(Compare)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCmpTAndBranch: public LCmpT {
+ public:
+  LCmpTAndBranch(LOperand* left,
+                 LOperand* right,
+                 int true_block_id,
+                 int false_block_id)
+      : LCmpT(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LInstanceOf: public LBinaryOperation {
+ public:
+  LInstanceOf(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfAndBranch: public LInstanceOf {
+ public:
+  LInstanceOfAndBranch(LOperand* left,
+                       LOperand* right,
+                       int true_block_id,
+                       int false_block_id)
+      : LInstanceOf(left, right),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LBoundsCheck: public LBinaryOperation {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length)
+      : LBinaryOperation(index, length) { }
+
+  LOperand* index() const { return left(); }
+  LOperand* length() const { return right(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+};
+
+
+class LBitI: public LBinaryOperation {
+ public:
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+
+ private:
+  Token::Value op_;
+};
+
+
+class LShiftI: public LBinaryOperation {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+
+  Token::Value op() const { return op_; }
+
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI: public LBinaryOperation {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstant: public LInstruction {
+  DECLARE_INSTRUCTION(Constant)
+};
+
+
+class LConstantI: public LConstant {
+ public:
+  explicit LConstantI(int32_t value) : value_(value) { }
+  int32_t value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+
+ private:
+  int32_t value_;
+};
+
+
+class LConstantD: public LConstant {
+ public:
+  explicit LConstantD(double value) : value_(value) { }
+  double value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+
+ private:
+  double value_;
+};
+
+
+class LConstantT: public LConstant {
+ public:
+  explicit LConstantT(Handle<Object> value) : value_(value) { }
+  Handle<Object> value() const { return value_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+
+ private:
+  Handle<Object> value_;
+};
+
+
+class LBranch: public LUnaryOperation {
+ public:
+  LBranch(LOperand* input, int true_block_id, int false_block_id)
+      : LUnaryOperation(input),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Value)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LCmpMapAndBranch: public LUnaryOperation {
+ public:
+  LCmpMapAndBranch(LOperand* value,
+                   Handle<Map> map,
+                   int true_block_id,
+                   int false_block_id)
+      : LUnaryOperation(value),
+        map_(map),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+
+  virtual bool IsControl() const { return true; }
+
+  Handle<Map> map() const { return map_; }
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  Handle<Map> map_;
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LArrayLength: public LUnaryOperation {
+ public:
+  LArrayLength(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLength, "array-length")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLength)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LValueOf: public LUnaryOperation {
+ public:
+  LValueOf(LOperand* input, LOperand* temporary)
+      : LUnaryOperation(input), temporary_(temporary) { }
+
+  LOperand* temporary() const { return temporary_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
+  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
+
+ private:
+  LOperand* temporary_;
+};
+
+
+class LThrow: public LUnaryOperation {
+ public:
+  explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
+};
+
+
+class LBitNotI: public LUnaryOperation {
+ public:
+  explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
+};
+
+
+class LAddI: public LBinaryOperation {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LPower: public LBinaryOperation {
+ public:
+  LPower(LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD: public LBinaryOperation {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  Token::Value op() const { return op_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT: public LBinaryOperation {
+ public:
+  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
+      : LBinaryOperation(left, right), op_(op) { }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const;
+
+  Token::Value op() const { return op_; }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn: public LUnaryOperation {
+ public:
+  explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField: public LUnaryOperation {
+ public:
+  explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric: public LUnaryOperation {
+ public:
+  explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* object() const { return input(); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadElements: public LUnaryOperation {
+ public:
+  explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
+};
+
+
+class LLoadKeyedFastElement: public LBinaryOperation {
+ public:
+  LLoadKeyedFastElement(LOperand* elements,
+                        LOperand* key,
+                        LOperand* load_result)
+      : LBinaryOperation(elements, key),
+        load_result_(load_result) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+  LOperand* elements() const { return left(); }
+  LOperand* key() const { return right(); }
+  LOperand* load_result() const { return load_result_; }
+
+ private:
+  LOperand* load_result_;
+};
+
+
+class LLoadKeyedGeneric: public LBinaryOperation {
+ public:
+  LLoadKeyedGeneric(LOperand* obj, LOperand* key)
+      : LBinaryOperation(obj, key) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LLoadGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+};
+
+
+class LStoreGlobal: public LUnaryOperation {
+ public:
+  explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+};
+
+
+class LPushArgument: public LUnaryOperation {
+ public:
+  explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LGlobalObject: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
+};
+
+
+class LGlobalReceiver: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
+};
+
+
+class LCallConstantFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKeyed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNamed: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
+  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const { return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 2; }
+};
+
+
+class LCallGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<String> name() const {return hydrogen()->name(); }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallKnownGlobal: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  Handle<JSFunction> target() const { return hydrogen()->target();  }
+  int arity() const { return hydrogen()->argument_count() - 1;  }
+};
+
+
+class LCallNew: public LUnaryOperation {
+ public:
+  explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+};
+
+
+class LInteger32ToDouble: public LUnaryOperation {
+ public:
+  explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LNumberTagI: public LUnaryOperation {
+ public:
+  explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagD: public LUnaryOperation {
+ public:
+  explicit LNumberTagD(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI: public LUnaryOperation {
+ public:
+  explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI: public LUnaryOperation {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp)
+      : LUnaryOperation(value), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LSmiTag: public LUnaryOperation {
+ public:
+  explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+};
+
+
+class LNumberUntagD: public LUnaryOperation {
+ public:
+  explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+};
+
+
+class LSmiUntag: public LUnaryOperation {
+ public:
+  LSmiUntag(LOperand* use, bool needs_check)
+      : LUnaryOperation(use), needs_check_(needs_check) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamed: public LInstruction {
+ public:
+  LStoreNamed(LOperand* obj, Handle<Object> name, LOperand* val)
+      : object_(obj), name_(name), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreNamed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  Handle<Object> name() const { return name_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  Handle<Object> name_;
+  LOperand* value_;
+};
+
+
+class LStoreNamedField: public LStoreNamed {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   Handle<Object> name,
+                   LOperand* val,
+                   bool in_object,
+                   int offset,
+                   LOperand* temp,
+                   bool needs_write_barrier,
+                   Handle<Map> transition)
+      : LStoreNamed(obj, name, val),
+        is_in_object_(in_object),
+        offset_(offset),
+        temp_(temp),
+        needs_write_barrier_(needs_write_barrier),
+        transition_(transition) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+
+  bool is_in_object() { return is_in_object_; }
+  int offset() { return offset_; }
+  LOperand* temp() { return temp_; }
+  bool needs_write_barrier() { return needs_write_barrier_; }
+  Handle<Map> transition() const { return transition_; }
+  void set_transition(Handle<Map> map) { transition_ = map; }
+
+ private:
+  bool is_in_object_;
+  int offset_;
+  LOperand* temp_;
+  bool needs_write_barrier_;
+  Handle<Map> transition_;
+};
+
+
+class LStoreNamedGeneric: public LStoreNamed {
+ public:
+  LStoreNamedGeneric(LOperand* obj,
+                     Handle<Object> name,
+                     LOperand* val)
+      : LStoreNamed(obj, name, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+};
+
+
+class LStoreKeyed: public LInstruction {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
+      : object_(obj), key_(key), value_(val) { }
+
+  DECLARE_INSTRUCTION(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  LOperand* object() const { return object_; }
+  LOperand* key() const { return key_; }
+  LOperand* value() const { return value_; }
+
+ private:
+  LOperand* object_;
+  LOperand* key_;
+  LOperand* value_;
+};
+
+
+class LStoreKeyedFastElement: public LStoreKeyed {
+ public:
+  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+                               "store-keyed-fast-element")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+};
+
+
+class LStoreKeyedGeneric: public LStoreKeyed {
+ public:
+  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
+      : LStoreKeyed(obj, key, val) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+};
+
+
+class LCheckFunction: public LUnaryOperation {
+ public:
+  explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
+  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
+};
+
+
+class LCheckInstanceType: public LUnaryOperation {
+ public:
+  LCheckInstanceType(LOperand* use, LOperand* temp)
+      : LUnaryOperation(use), temp_(temp) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+
+  LOperand* temp() const { return temp_; }
+
+ private:
+  LOperand* temp_;
+};
+
+
+class LCheckMap: public LUnaryOperation {
+ public:
+  explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
+};
+
+
+class LCheckPrototypeMaps: public LInstruction {
+ public:
+  LCheckPrototypeMaps(LOperand* temp,
+                      Handle<JSObject> holder,
+                      Handle<Map> receiver_map)
+      : temp_(temp),
+        holder_(holder),
+        receiver_map_(receiver_map) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
+
+  LOperand* temp() const { return temp_; }
+  Handle<JSObject> holder() const { return holder_; }
+  Handle<Map> receiver_map() const { return receiver_map_; }
+
+ private:
+  LOperand* temp_;
+  Handle<JSObject> holder_;
+  Handle<Map> receiver_map_;
+};
+
+
+class LCheckSmi: public LUnaryOperation {
+ public:
+  LCheckSmi(LOperand* use, Condition condition)
+      : LUnaryOperation(use), condition_(condition) { }
+
+  Condition condition() const { return condition_; }
+
+  virtual void CompileToNative(LCodeGen* generator);
+  virtual const char* Mnemonic() const {
+    return (condition_ == zero) ? "check-non-smi" : "check-smi";
+  }
+
+ private:
+  Condition condition_;
+};
+
+
+class LMaterializedLiteral: public LInstruction {
+ public:
+  DECLARE_INSTRUCTION(MaterializedLiteral)
+};
+
+
+class LArrayLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
+};
+
+
+class LObjectLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
+  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+};
+
+
+class LRegExpLiteral: public LMaterializedLiteral {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+
+  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
+};
+
+
+class LTypeof: public LUnaryOperation {
+ public:
+  explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIs: public LUnaryOperation {
+ public:
+  explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
+  virtual void PrintDataTo(StringStream* stream) const;
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+};
+
+
+class LTypeofIsAndBranch: public LTypeofIs {
+ public:
+  LTypeofIsAndBranch(LOperand* value,
+                     int true_block_id,
+                     int false_block_id)
+      : LTypeofIs(value),
+        true_block_id_(true_block_id),
+        false_block_id_(false_block_id) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream) const;
+  virtual bool IsControl() const { return true; }
+
+  int true_block_id() const { return true_block_id_; }
+  int false_block_id() const { return false_block_id_; }
+
+ private:
+  int true_block_id_;
+  int false_block_id_;
+};
+
+
+class LDeleteProperty: public LBinaryOperation {
+ public:
+  LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
+
+  LOperand* object() const { return left(); }
+  LOperand* key() const { return right(); }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  LOsrEntry();
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+
+  LOperand** SpilledRegisterArray() { return register_spills_; }
+  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand);
+
+ private:
+  // Arrays of spill slot operands for registers with an assigned spill
+  // slot, i.e., that must also be restored to the spill slot on OSR entry.
+  // NULL if the register has no assigned spill slot.  Indexed by allocation
+  // index.
+  LOperand* register_spills_[Register::kNumAllocatableRegisters];
+  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+};
+
+
+class LStackCheck: public LInstruction {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position)
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  int position() const { return position_; }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    ASSERT(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op);
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  int position_;
+  int lithium_position_;
+};
+
+
+class LEnvironment: public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               int ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer)
+      : closure_(closure),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        parameter_count_(parameter_count),
+        values_(value_count),
+        representations_(value_count),
+        spilled_registers_(NULL),
+        spilled_double_registers_(NULL),
+        outer_(outer) {
+  }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  int ast_id() const { return ast_id_; }
+  int parameter_count() const { return parameter_count_; }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+
+  void AddValue(LOperand* operand, Representation representation) {
+    values_.Add(operand);
+    representations_.Add(representation);
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return representations_[index].IsTagged();
+  }
+
+  void Register(int deoptimization_index, int translation_index) {
+    ASSERT(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void SetSpilledRegisters(LOperand** registers,
+                           LOperand** double_registers) {
+    spilled_registers_ = registers;
+    spilled_double_registers_ = double_registers;
+  }
+
+  // Emit frame translation commands for this environment.
+  void WriteTranslation(LCodeGen* cgen, Translation* translation) const;
+
+  void PrintTo(StringStream* stream) const;
+
+ private:
+  Handle<JSFunction> closure_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  int ast_id_;
+  int parameter_count_;
+  ZoneList<LOperand*> values_;
+  ZoneList<Representation> representations_;
+
+  // Allocation index indexed arrays of spill slot operands for registers
+  // that are also in spill slots at an OSR entry.  NULL for environments
+  // that do not correspond to an OSR entry.
+  LOperand** spilled_registers_;
+  LOperand** spilled_double_registers_;
+
+  LEnvironment* outer_;
+};
+
+class LChunkBuilder;
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph);
+
+  int AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int GetNextSpillIndex(bool is_double);
+  LOperand* GetNextSpillSlot(bool is_double);
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  HGraph* graph() const { return graph_; }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const {
+    HBasicBlock* block = graph_->blocks()->at(block_id);
+    int first_instruction = block->first_instruction_index();
+    return LLabel::cast(instructions_[first_instruction]);
+  }
+  int LookupDestination(int block_id) const {
+    LLabel* cur = GetLabel(block_id);
+    while (cur->replacement() != NULL) {
+      cur = cur->replacement();
+    }
+    return cur->block_id();
+  }
+  Label* GetAssemblyLabel(int block_id) const {
+    LLabel* label = GetLabel(block_id);
+    ASSERT(!label->HasReplacement());
+    return label->label();
+  }
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
+    return &inlined_closures_;
+  }
+
+  void AddInlinedClosure(Handle<JSFunction> closure) {
+    inlined_closures_.Add(closure);
+  }
+
+  void Verify() const;
+
+ private:
+  int spill_slot_count_;
+  HGraph* const graph_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<JSFunction> > inlined_closures_;
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator)
+      : chunk_(NULL),
+        graph_(graph),
+        status_(UNUSED),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        argument_count_(0),
+        allocator_(allocator),
+        position_(RelocInfo::kNoPosition),
+        instructions_pending_deoptimization_environment_(NULL),
+        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  enum Status {
+    UNUSED,
+    BUILDING,
+    DONE,
+    ABORTED
+  };
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(const char* format, ...);
+
+  // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  LOperand* Use(HValue* value, LUnallocated* operand);
+  LOperand* UseFixed(HValue* value, Register fixed_register);
+  LOperand* UseFixedDouble(HValue* value, XMMRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  LOperand* UseRegister(HValue* value);
+  LOperand* UseRegisterAtStart(HValue* value);
+
+  // A value in a register that may be trashed.
+  LOperand* UseTempRegister(HValue* value);
+  LOperand* Use(HValue* value);
+  LOperand* UseAtStart(HValue* value);
+  LOperand* UseOrConstant(HValue* value);
+  LOperand* UseOrConstantAtStart(HValue* value);
+  LOperand* UseRegisterOrConstant(HValue* value);
+  LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LInstruction* instr, LUnallocated* result);
+  LInstruction* Define(LInstruction* instr);
+  LInstruction* DefineAsRegister(LInstruction* instr);
+  LInstruction* DefineAsSpilled(LInstruction* instr, int index);
+  LInstruction* DefineSameAsAny(LInstruction* instr);
+  LInstruction* DefineSameAsFirst(LInstruction* instr);
+  LInstruction* DefineFixed(LInstruction* instr, Register reg);
+  LInstruction* DefineFixedDouble(LInstruction* instr, XMMRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
+      LInstruction* instr, int ast_id);
+  void ClearInstructionPendingDeoptimizationEnvironment();
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+
+  // Temporary operand that may be a memory location.
+  LOperand* Temp();
+  // Temporary operand that must be in a register.
+  LUnallocated* TempRegister();
+  LOperand* FixedTemp(Register reg);
+  LOperand* FixedTemp(XMMRegister reg);
+
+  void VisitInstruction(HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+
+  LChunk* chunk_;
+  HGraph* const graph_;
+  Status status_;
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  int argument_count_;
+  LAllocator* allocator_;
+  int position_;
+  LInstruction* instructions_pending_deoptimization_environment_;
+  int pending_deoptimization_ast_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_IA32_LITHIUM_IA32_H_
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index cbf93dd..7c33906 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -74,30 +74,6 @@
 }
 
 
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
-    cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
-    j(cc, branch);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start().address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, Heap::NewSpaceMask());
-    j(cc, branch);
-  }
-}
-
-
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
@@ -109,7 +85,7 @@
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
-  Label done;
+  NearLabel done;
 
   // Skip barrier if writing a smi.
   ASSERT_EQ(0, kSmiTag);
@@ -183,13 +159,6 @@
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  cmp(esp,
-      Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
-  j(below, on_stack_overflow);
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
   Set(eax, Immediate(0));
@@ -364,9 +333,20 @@
 }
 
 
-void MacroAssembler::EnterExitFrameEpilogue(int argc) {
-  // Reserve space for arguments.
-  sub(Operand(esp), Immediate(argc * kPointerSize));
+void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+  // Optionally save all XMM registers.
+  if (save_doubles) {
+    CpuFeatures::Scope scope(SSE2);
+    int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
+    sub(Operand(esp), Immediate(space));
+    int offset = -2 * kPointerSize;
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+    }
+  } else {
+    sub(Operand(esp), Immediate(argc * kPointerSize));
+  }
 
   // Get the required frame alignment for the OS.
   static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -380,7 +360,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame() {
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
   // Setup argc and argv in callee-saved registers.
@@ -388,17 +368,27 @@
   mov(edi, Operand(eax));
   lea(esi, Operand(ebp, eax, times_4, offset));
 
-  EnterExitFrameEpilogue(2);
+  EnterExitFrameEpilogue(2, save_doubles);
 }
 
 
 void MacroAssembler::EnterApiExitFrame(int argc) {
   EnterExitFramePrologue();
-  EnterExitFrameEpilogue(argc);
+  EnterExitFrameEpilogue(argc, false);
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore all XMM registers.
+  if (save_doubles) {
+    CpuFeatures::Scope scope(SSE2);
+    int offset = -2 * kPointerSize;
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+    }
+  }
+
   // Get the return address from the stack and restore the frame pointer.
   mov(ecx, Operand(ebp, 1 * kPointerSize));
   mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1098,6 +1088,16 @@
 }
 
 
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+  Runtime::Function* function = Runtime::FunctionForId(id);
+  Set(eax, Immediate(function->nargs));
+  mov(ebx, Immediate(ExternalReference(function)));
+  CEntryStub ces(1);
+  ces.SaveDoubles();
+  CallStub(&ces);
+}
+
+
 MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
                                             int num_arguments) {
   return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
@@ -1192,25 +1192,29 @@
 }
 
 
-// If true, a Handle<T> passed by value is passed and returned by
-// using the location_ field directly.  If false, it is passed and
-// returned as a pointer to a handle.
-#ifdef USING_BSD_ABI
-static const bool kPassHandlesDirectly = true;
+// If true, a Handle<T> returned by value from a function with cdecl calling
+// convention will be returned directly as a value of location_ field in a
+// register eax.
+// If false, it is returned as a pointer to a preallocated by caller memory
+// region. Pointer to this region should be passed to a function as an
+// implicit first argument.
+#if defined(USING_BSD_ABI) || defined(__MINGW32__)
+static const bool kReturnHandlesDirectly = true;
 #else
-static const bool kPassHandlesDirectly = false;
+static const bool kReturnHandlesDirectly = false;
 #endif
 
 
 Operand ApiParameterOperand(int index) {
-  return Operand(esp, (index + (kPassHandlesDirectly ? 0 : 1)) * kPointerSize);
+  return Operand(
+      esp, (index + (kReturnHandlesDirectly ? 0 : 1)) * kPointerSize);
 }
 
 
 void MacroAssembler::PrepareCallApiFunction(int argc, Register scratch) {
-  if (kPassHandlesDirectly) {
+  if (kReturnHandlesDirectly) {
     EnterApiExitFrame(argc);
-    // When handles as passed directly we don't have to allocate extra
+    // When handles are returned directly we don't have to allocate extra
     // space for and pass an out parameter.
   } else {
     // We allocate two additional slots: return value and pointer to it.
@@ -1255,7 +1259,7 @@
   // Call the api function!
   call(function->address(), RelocInfo::RUNTIME_ENTRY);
 
-  if (!kPassHandlesDirectly) {
+  if (!kReturnHandlesDirectly) {
     // The returned value is a pointer to the handle holding the result.
     // Dereference this to get to the location.
     mov(eax, Operand(eax, 0));
@@ -1336,7 +1340,8 @@
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
                                     Label* done,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   bool definitely_matches = false;
   Label invoke;
   if (expected.is_immediate()) {
@@ -1387,6 +1392,7 @@
 
     if (flag == CALL_FUNCTION) {
       call(adaptor, RelocInfo::CODE_TARGET);
+      if (post_call_generator != NULL) post_call_generator->Generate();
       jmp(done);
     } else {
       jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1399,11 +1405,14 @@
 void MacroAssembler::InvokeCode(const Operand& code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
-  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, flag, post_call_generator);
   if (flag == CALL_FUNCTION) {
     call(code);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     jmp(code);
@@ -1416,12 +1425,15 @@
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
-                                InvokeFlag flag) {
+                                InvokeFlag flag,
+                                PostCallGenerator* post_call_generator) {
   Label done;
   Operand dummy(eax);
-  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  InvokePrologue(expected, actual, code, dummy, &done,
+                 flag, post_call_generator);
   if (flag == CALL_FUNCTION) {
     call(code, rmode);
+    if (post_call_generator != NULL) post_call_generator->Generate();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     jmp(code, rmode);
@@ -1432,7 +1444,8 @@
 
 void MacroAssembler::InvokeFunction(Register fun,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1441,25 +1454,37 @@
 
   ParameterCount expected(ebx);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag);
+             expected, actual, flag, post_call_generator);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
-                                    InvokeFlag flag) {
+                                    InvokeFlag flag,
+                                    PostCallGenerator* post_call_generator) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   mov(edi, Immediate(Handle<JSFunction>(function)));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-  // Invoke the cached code.
-  Handle<Code> code(function->code());
+
   ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+               expected, actual, flag, post_call_generator);
+  } else {
+    Handle<Code> code(function->code());
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
+               flag, post_call_generator);
+  }
 }
 
 
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   PostCallGenerator* post_call_generator) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -1469,7 +1494,7 @@
   ParameterCount expected(0);
   GetBuiltinFunction(edi, id);
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-           expected, expected, flag);
+             expected, expected, flag, post_call_generator);
 }
 
 void MacroAssembler::GetBuiltinFunction(Register target,
@@ -1534,6 +1559,15 @@
 }
 
 
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the lowest encoding,
+  // which means that lowest encodings are furthest away from
+  // the stack pointer.
+  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  return kNumSafepointRegisters - reg_code - 1;
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index d208dbe..6f5fa87 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,6 +51,7 @@
 
 // Forward declaration.
 class JumpTarget;
+class PostCallGenerator;
 
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
@@ -69,10 +70,11 @@
 
   // Check if object is in new space.
   // scratch can be object itself, but it will be clobbered.
+  template <typename LabelType>
   void InNewSpace(Register object,
                   Register scratch,
                   Condition cc,  // equal for new space, not_equal otherwise.
-                  Label* branch);
+                  LabelType* branch);
 
   // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
@@ -103,12 +105,6 @@
 #endif
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  // Do simple test for stack overflow. This doesn't handle an overflow.
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -117,18 +113,18 @@
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either in normal or debug mode.
-  // Expects the number of arguments in register eax and
-  // sets up the number of arguments in register edi and the pointer
-  // to the first argument in register esi.
-  void EnterExitFrame();
+  // Enter specific kind of exit frame. Expects the number of
+  // arguments in register eax and sets up the number of arguments in
+  // register edi and the pointer to the first argument in register
+  // esi.
+  void EnterExitFrame(bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
   // argument in register esi.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in
   // register eax (untouched).
@@ -144,6 +140,11 @@
   // function and map can be the same.
   void LoadGlobalFunctionInitialMap(Register function, Register map);
 
+  // Push and pop the registers that can hold pointers.
+  void PushSafepointRegisters() { pushad(); }
+  void PopSafepointRegisters() { popad(); }
+  static int SafepointRegisterStackIndex(int reg_code);
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -151,27 +152,33 @@
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   void InvokeCode(Handle<Code> code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   RelocInfo::Mode rmode,
-                  InvokeFlag flag);
+                  InvokeFlag flag,
+                  PostCallGenerator* post_call_generator = NULL);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
   void InvokeFunction(Register function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
-  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeFlag flag,
+                     PostCallGenerator* post_call_generator = NULL);
 
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -457,6 +464,7 @@
 
   // Call a runtime routine.
   void CallRuntime(Runtime::Function* f, int num_arguments);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
   // Call a runtime function, returning the CodeStub object called.
   // Try to generate the stub code if necessary.  Do not perform a GC
@@ -546,6 +554,12 @@
 
   void Call(Label* target) { call(target); }
 
+  // Emit call to the code we are currently generating.
+  void CallSelf() {
+    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+    call(self, RelocInfo::CODE_TARGET);
+  }
+
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
@@ -618,14 +632,15 @@
                       Handle<Code> code_constant,
                       const Operand& code_operand,
                       Label* done,
-                      InvokeFlag flag);
+                      InvokeFlag flag,
+                      PostCallGenerator* post_call_generator = NULL);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
 
   void EnterExitFramePrologue();
-  void EnterExitFrameEpilogue(int argc);
+  void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
   void LeaveExitFrameEpilogue();
 
@@ -644,6 +659,31 @@
 };
 
 
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                LabelType* branch) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(scratch, Operand(object));
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    and_(Operand(scratch), Immediate(ExternalReference::new_space_mask()));
+    cmp(Operand(scratch), Immediate(ExternalReference::new_space_start()));
+    j(cc, branch);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start().address());
+    lea(scratch, Operand(object, -new_space_start));
+    and_(scratch, Heap::NewSpaceMask());
+    j(cc, branch);
+  }
+}
+
+
 // The code patcher is used to patch (typically) small parts of code e.g. for
 // debugging and other types of instrumentation. When using the code patcher
 // the exact number of bytes specified must be emitted. Is not legal to emit
@@ -664,6 +704,17 @@
 };
 
 
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+  PostCallGenerator() { }
+  virtual ~PostCallGenerator() { }
+  virtual void Generate() = 0;
+};
+
+
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index adcb521..99888b0 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -855,9 +855,14 @@
   }
   JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Immediate(Handle<Object>(cell)));
-  __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-         Immediate(Factory::the_hole_value()));
+  if (Serializer::enabled()) {
+    __ mov(scratch, Immediate(Handle<Object>(cell)));
+    __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+           Immediate(Factory::the_hole_value()));
+  } else {
+    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
+           Immediate(Factory::the_hole_value()));
+  }
   __ j(not_equal, miss, not_taken);
   return cell;
 }
@@ -1326,8 +1331,12 @@
                                                     JSFunction* function,
                                                     Label* miss) {
   // Get the value from the cell.
-  __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+  if (Serializer::enabled()) {
+    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+  }
 
   // Check that the cell contains the same function.
   if (Heap::InNewSpace(function)) {
@@ -1710,7 +1719,7 @@
   char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1785,7 +1794,7 @@
   char_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1858,7 +1867,7 @@
   char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -2124,8 +2133,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, NULL, function, name);
     Object* result;
@@ -2366,8 +2375,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, cell, function, name);
     Object* result;
@@ -2399,10 +2408,18 @@
   // Jump to the cached code (tail call).
   __ IncrementCounter(&Counters::call_global_inline, 1);
   ASSERT(function->is_compiled());
-  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  __ InvokeCode(code, expected, arguments(),
-                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+                  expected, arguments(), JUMP_FUNCTION);
+  } else {
+    Handle<Code> code(function->code());
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
@@ -2565,8 +2582,12 @@
   __ j(not_equal, &miss, not_taken);
 
   // Store the value in the cell.
-  __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+  if (Serializer::enabled()) {
+    __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
+  } else {
+    __ mov(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)), eax);
+  }
 
   // Return the value (register eax).
   __ IncrementCounter(&Counters::named_store_global_inline, 1);
@@ -2620,6 +2641,63 @@
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map matches.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(Factory::fixed_array_map()));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
+    __ j(above_equal, &miss, not_taken);
+  } else {
+    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // Compare smis.
+    __ j(above_equal, &miss, not_taken);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ mov(edx, Operand(eax));
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ RecordWrite(edi, 0, edx, ecx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
                                                       JSObject* object,
                                                       JSObject* last) {
@@ -2793,8 +2871,12 @@
   CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
 
   // Get the value from the cell.
-  __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-  __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+  if (Serializer::enabled()) {
+    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
+  }
 
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
@@ -3019,6 +3101,51 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- eax    : key
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &miss, not_taken);
+
+  // Check that the map matches.
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss, not_taken);
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &miss, not_taken);
+
+  // Get the elements array.
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ AssertFastElements(ecx);
+
+  // Check that the key is within bounds.
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss, not_taken);
+
+  // Load the result and make sure it's not the hole.
+  __ mov(ebx, Operand(ecx, eax, times_2,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ cmp(ebx, Factory::the_hole_value());
+  __ j(equal, &miss, not_taken);
+  __ mov(eax, ebx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 94dbd5f..8fbc184 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -75,7 +75,7 @@
 
 
 void IC::SetTargetAtAddress(Address address, Code* target) {
-  ASSERT(target->is_inline_cache_stub());
+  ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
   Assembler::set_target_address_at(address, target->instruction_start());
 }
 
diff --git a/src/ic.cc b/src/ic.cc
index 58acebc..645c6fd 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -30,6 +30,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "arguments.h"
+#include "codegen.h"
 #include "execution.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -156,7 +157,7 @@
 IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
   IC::State state = target->ic_state();
 
-  if (state != MONOMORPHIC) return state;
+  if (state != MONOMORPHIC || !name->IsString()) return state;
   if (receiver->IsUndefined() || receiver->IsNull()) return state;
 
   InlineCacheHolderFlag cache_holder =
@@ -259,8 +260,12 @@
     case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
-    case Code::BINARY_OP_IC: return;  // Clearing these is tricky and does not
-                                      // make any performance difference.
+    case Code::BINARY_OP_IC:
+    case Code::TYPE_RECORDING_BINARY_OP_IC:
+    case Code::COMPARE_IC:
+      // Clearing these is tricky and does not
+      // make any performance difference.
+      return;
     default: UNREACHABLE();
   }
 }
@@ -1134,9 +1139,20 @@
         stub = external_array_stub(receiver->GetElementsKind());
       } else if (receiver->HasIndexedInterceptor()) {
         stub = indexed_interceptor_stub();
+      } else if (state == UNINITIALIZED &&
+                 key->IsSmi() &&
+                 receiver->map()->has_fast_elements()) {
+        MaybeObject* probe = StubCache::ComputeKeyedLoadSpecialized(*receiver);
+        stub =
+            probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
       }
     }
-    set_target(stub);
+    if (stub != NULL) set_target(stub);
+
+#ifdef DEBUG
+    TraceIC("KeyedLoadIC", key, state, target());
+#endif  // DEBUG
+
     // For JSObjects with fast elements that are not value wrappers
     // and that do not have indexed interceptors, we initialize the
     // inlined fast case (if present) by patching the inlined map
@@ -1360,6 +1376,17 @@
     }
   }
 
+  if (receiver->IsJSGlobalProxy()) {
+    // Generate a generic stub that goes to the runtime when we see a global
+    // proxy as receiver.
+    if (target() != global_proxy_stub()) {
+      set_target(global_proxy_stub());
+#ifdef DEBUG
+      TraceIC("StoreIC", name, state, target());
+#endif
+    }
+  }
+
   // Set the property.
   return receiver->SetProperty(*name, *value, NONE);
 }
@@ -1503,9 +1530,15 @@
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
       if (receiver->HasExternalArrayElements()) {
         stub = external_array_stub(receiver->GetElementsKind());
+      } else if (state == UNINITIALIZED &&
+                 key->IsSmi() &&
+                 receiver->map()->has_fast_elements()) {
+        MaybeObject* probe = StubCache::ComputeKeyedStoreSpecialized(*receiver);
+        stub =
+            probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
       }
     }
-    set_target(stub);
+    if (stub != NULL) set_target(stub);
   }
 
   // Set the property.
@@ -1750,6 +1783,7 @@
 
 const char* BinaryOpIC::GetName(TypeInfo type_info) {
   switch (type_info) {
+    case UNINIT_OR_SMI: return "UninitOrSmi";
     case DEFAULT: return "Default";
     case GENERIC: return "Generic";
     case HEAP_NUMBERS: return "HeapNumbers";
@@ -1761,23 +1795,26 @@
 
 BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
   switch (type_info) {
-    // DEFAULT is mapped to UNINITIALIZED so that calls to DEFAULT stubs
-    // are not cleared at GC.
-    case DEFAULT: return UNINITIALIZED;
-
-    // Could have mapped GENERIC to MONOMORPHIC just as well but MEGAMORPHIC is
-    // conceptually closer.
-    case GENERIC: return MEGAMORPHIC;
-
-    default: return MONOMORPHIC;
+    case UNINIT_OR_SMI:
+      return UNINITIALIZED;
+    case DEFAULT:
+    case HEAP_NUMBERS:
+    case STRINGS:
+      return MONOMORPHIC;
+    case GENERIC:
+      return MEGAMORPHIC;
   }
+  UNREACHABLE();
+  return UNINITIALIZED;
 }
 
 
 BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
                                              Object* right) {
   if (left->IsSmi() && right->IsSmi()) {
-    return GENERIC;
+    // If we have two smi inputs we can reach here because
+    // of an overflow. Enter default state.
+    return DEFAULT;
   }
 
   if (left->IsNumber() && right->IsNumber()) {
@@ -1794,43 +1831,36 @@
 }
 
 
-// defined in codegen-<arch>.cc
+// defined in code-stubs-<arch>.cc
 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
 
 
 MUST_USE_RESULT MaybeObject* BinaryOp_Patch(Arguments args) {
   ASSERT(args.length() == 5);
 
+  HandleScope scope;
   Handle<Object> left = args.at<Object>(0);
   Handle<Object> right = args.at<Object>(1);
   int key = Smi::cast(args[2])->value();
   Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
-#ifdef DEBUG
-  BinaryOpIC::TypeInfo prev_type_info =
+  BinaryOpIC::TypeInfo previous_type =
       static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-#endif  // DEBUG
-  { HandleScope scope;
-    BinaryOpIC::TypeInfo type_info = BinaryOpIC::GetTypeInfo(*left, *right);
-    Handle<Code> code = GetBinaryOpStub(key, type_info);
-    if (!code.is_null()) {
-      BinaryOpIC ic;
-      ic.patch(*code);
-#ifdef DEBUG
-      if (FLAG_trace_ic) {
-        PrintF("[BinaryOpIC (%s->%s)#%s]\n",
-            BinaryOpIC::GetName(prev_type_info),
-            BinaryOpIC::GetName(type_info),
-            Token::Name(op));
-      }
-#endif  // DEBUG
+
+  BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
+  Handle<Code> code = GetBinaryOpStub(key, type);
+  if (!code.is_null()) {
+    BinaryOpIC ic;
+    ic.patch(*code);
+    if (FLAG_trace_ic) {
+      PrintF("[BinaryOpIC (%s->%s)#%s]\n",
+             BinaryOpIC::GetName(previous_type),
+             BinaryOpIC::GetName(type),
+             Token::Name(op));
     }
   }
 
-  HandleScope scope;
   Handle<JSBuiltinsObject> builtins = Top::builtins();
-
   Object* builtin = NULL;  // Initialization calms down the compiler.
-
   switch (op) {
     case Token::ADD:
       builtin = builtins->javascript_builtin(Builtins::ADD);
@@ -1885,6 +1915,248 @@
 }
 
 
+void TRBinaryOpIC::patch(Code* code) {
+  set_target(code);
+}
+
+
+const char* TRBinaryOpIC::GetName(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED: return "Uninitialized";
+    case SMI: return "SMI";
+    case INT32: return "Int32s";
+    case HEAP_NUMBER: return "HeapNumbers";
+    case STRING: return "Strings";
+    case GENERIC: return "Generic";
+    default: return "Invalid";
+  }
+}
+
+
+TRBinaryOpIC::State TRBinaryOpIC::ToState(TypeInfo type_info) {
+  switch (type_info) {
+    case UNINITIALIZED:
+      return ::v8::internal::UNINITIALIZED;
+    case SMI:
+    case INT32:
+    case HEAP_NUMBER:
+    case STRING:
+      return MONOMORPHIC;
+    case GENERIC:
+      return MEGAMORPHIC;
+  }
+  UNREACHABLE();
+  return ::v8::internal::UNINITIALIZED;
+}
+
+
+TRBinaryOpIC::TypeInfo TRBinaryOpIC::JoinTypes(TRBinaryOpIC::TypeInfo x,
+                                               TRBinaryOpIC::TypeInfo y) {
+  if (x == UNINITIALIZED) return y;
+  if (y == UNINITIALIZED) return x;
+  if (x == STRING && y == STRING) return STRING;
+  if (x == STRING || y == STRING) return GENERIC;
+  if (x >= y) return x;
+  return y;
+}
+
+TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
+                                                 Handle<Object> right) {
+  ::v8::internal::TypeInfo left_type =
+      ::v8::internal::TypeInfo::TypeFromValue(left);
+  ::v8::internal::TypeInfo right_type =
+      ::v8::internal::TypeInfo::TypeFromValue(right);
+
+  if (left_type.IsSmi() && right_type.IsSmi()) {
+    return SMI;
+  }
+
+  if (left_type.IsInteger32() && right_type.IsInteger32()) {
+    return INT32;
+  }
+
+  if (left_type.IsNumber() && right_type.IsNumber()) {
+    return HEAP_NUMBER;
+  }
+
+  if (left_type.IsString() || right_type.IsString()) {
+    // Patching for fast string ADD makes sense even if only one of the
+    // arguments is a string.
+    return STRING;
+  }
+
+  return GENERIC;
+}
+
+
+// defined in code-stubs-<arch>.cc
+// Only needed to remove dependency of ic.cc on code-stubs-<arch>.h.
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+                                          TRBinaryOpIC::TypeInfo type_info,
+                                          TRBinaryOpIC::TypeInfo result_type);
+
+
+MaybeObject* TypeRecordingBinaryOp_Patch(Arguments args) {
+  ASSERT(args.length() == 5);
+
+  HandleScope scope;
+  Handle<Object> left = args.at<Object>(0);
+  Handle<Object> right = args.at<Object>(1);
+  int key = Smi::cast(args[2])->value();
+  Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
+  TRBinaryOpIC::TypeInfo previous_type =
+      static_cast<TRBinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
+
+  TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
+  type = TRBinaryOpIC::JoinTypes(type, previous_type);
+  TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
+  if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
+    type = TRBinaryOpIC::GENERIC;
+  }
+  if (type == TRBinaryOpIC::SMI &&
+      previous_type == TRBinaryOpIC::SMI) {
+    if (op == Token::DIV || op == Token::MUL) {
+      // Arithmetic on two Smi inputs has yielded a heap number.
+      // That is the only way to get here from the Smi stub.
+      result_type = TRBinaryOpIC::HEAP_NUMBER;
+    } else {
+      // Other operations on SMIs that overflow yield int32s.
+      result_type = TRBinaryOpIC::INT32;
+    }
+  }
+  if (type == TRBinaryOpIC::INT32 &&
+      previous_type == TRBinaryOpIC::INT32) {
+    // We must be here because an operation on two INT32 types overflowed.
+    result_type = TRBinaryOpIC::HEAP_NUMBER;
+  }
+
+  Handle<Code> code = GetTypeRecordingBinaryOpStub(key, type, result_type);
+  if (!code.is_null()) {
+    TRBinaryOpIC ic;
+    ic.patch(*code);
+    if (FLAG_trace_ic) {
+      PrintF("[TypeRecordingBinaryOpIC (%s->(%s->%s))#%s]\n",
+             TRBinaryOpIC::GetName(previous_type),
+             TRBinaryOpIC::GetName(type),
+             TRBinaryOpIC::GetName(result_type),
+             Token::Name(op));
+    }
+
+    // Activate inlined smi code.
+    if (previous_type == TRBinaryOpIC::UNINITIALIZED) {
+      PatchInlinedSmiCode(ic.address());
+    }
+  }
+
+  Handle<JSBuiltinsObject> builtins = Top::builtins();
+  Object* builtin = NULL;  // Initialization calms down the compiler.
+  switch (op) {
+    case Token::ADD:
+      builtin = builtins->javascript_builtin(Builtins::ADD);
+      break;
+    case Token::SUB:
+      builtin = builtins->javascript_builtin(Builtins::SUB);
+      break;
+    case Token::MUL:
+      builtin = builtins->javascript_builtin(Builtins::MUL);
+      break;
+    case Token::DIV:
+      builtin = builtins->javascript_builtin(Builtins::DIV);
+      break;
+    case Token::MOD:
+      builtin = builtins->javascript_builtin(Builtins::MOD);
+      break;
+    case Token::BIT_AND:
+      builtin = builtins->javascript_builtin(Builtins::BIT_AND);
+      break;
+    case Token::BIT_OR:
+      builtin = builtins->javascript_builtin(Builtins::BIT_OR);
+      break;
+    case Token::BIT_XOR:
+      builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
+      break;
+    case Token::SHR:
+      builtin = builtins->javascript_builtin(Builtins::SHR);
+      break;
+    case Token::SAR:
+      builtin = builtins->javascript_builtin(Builtins::SAR);
+      break;
+    case Token::SHL:
+      builtin = builtins->javascript_builtin(Builtins::SHL);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  Handle<JSFunction> builtin_function(JSFunction::cast(builtin));
+
+  bool caught_exception;
+  Object** builtin_args[] = { right.location() };
+  Handle<Object> result = Execution::Call(builtin_function,
+                                          left,
+                                          ARRAY_SIZE(builtin_args),
+                                          builtin_args,
+                                          &caught_exception);
+  if (caught_exception) {
+    return Failure::Exception();
+  }
+  return *result;
+}
+
+
+Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
+  ICCompareStub stub(op, UNINITIALIZED);
+  return stub.GetCode();
+}
+
+
+CompareIC::State CompareIC::ComputeState(Code* target) {
+  int key = target->major_key();
+  if (key == CodeStub::Compare) return GENERIC;
+  ASSERT(key == CodeStub::CompareIC);
+  return static_cast<State>(target->compare_state());
+}
+
+
+const char* CompareIC::GetStateName(State state) {
+  switch (state) {
+    case UNINITIALIZED: return "UNINITIALIZED";
+    case SMIS: return "SMIS";
+    case HEAP_NUMBERS: return "HEAP_NUMBERS";
+    case OBJECTS: return "OBJECTS";
+    case GENERIC: return "GENERIC";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+CompareIC::State CompareIC::TargetState(State state,
+                                        bool has_inlined_smi_code,
+                                        Handle<Object> x,
+                                        Handle<Object> y) {
+  if (!has_inlined_smi_code && state != UNINITIALIZED) return GENERIC;
+  if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
+  if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
+      x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
+  if (state == UNINITIALIZED &&
+      x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+  return GENERIC;
+}
+
+
+// Used from ic_<arch>.cc.
+Code* CompareIC_Miss(Arguments args) {
+  NoHandleAllocation na;
+  ASSERT(args.length() == 3);
+  CompareIC ic(static_cast<Token::Value>(Smi::cast(args[2])->value()));
+  ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+  return ic.target();
+}
+
+
 static Address IC_utilities[] = {
 #define ADDR(name) FUNCTION_ADDR(name),
     IC_UTIL_LIST(ADDR)
diff --git a/src/ic.h b/src/ic.h
index 7b8b1bf..8562bcc 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -28,7 +28,7 @@
 #ifndef V8_IC_H_
 #define V8_IC_H_
 
-#include "assembler.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -53,8 +53,9 @@
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)                       \
-  ICU(BinaryOp_Patch)
-
+  ICU(BinaryOp_Patch)                                 \
+  ICU(TypeRecordingBinaryOp_Patch)                    \
+  ICU(CompareIC_Miss)
 //
 // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
 // and KeyedStoreIC.
@@ -403,6 +404,7 @@
   static void GenerateMegamorphic(MacroAssembler* masm);
   static void GenerateArrayLength(MacroAssembler* masm);
   static void GenerateNormal(MacroAssembler* masm);
+  static void GenerateGlobalProxy(MacroAssembler* masm);
 
   // Clear the use of an inlined version.
   static void ClearInlinedVersion(Address address);
@@ -426,6 +428,9 @@
   static Code* initialize_stub() {
     return Builtins::builtin(Builtins::StoreIC_Initialize);
   }
+  static Code* global_proxy_stub() {
+    return Builtins::builtin(Builtins::StoreIC_GlobalProxy);
+  }
 
   static void Clear(Address address, Code* target);
 
@@ -503,6 +508,7 @@
  public:
 
   enum TypeInfo {
+    UNINIT_OR_SMI,
     DEFAULT,  // Initial state. When first executed, patches to one
               // of the following states depending on the operands types.
     HEAP_NUMBERS,  // Both arguments are HeapNumbers.
@@ -514,8 +520,6 @@
 
   void patch(Code* code);
 
-  static void Clear(Address address, Code* target);
-
   static const char* GetName(TypeInfo type_info);
 
   static State ToState(TypeInfo type_info);
@@ -523,6 +527,74 @@
   static TypeInfo GetTypeInfo(Object* left, Object* right);
 };
 
+
+// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
+class TRBinaryOpIC: public IC {
+ public:
+
+  enum TypeInfo {
+    UNINITIALIZED,
+    SMI,
+    INT32,
+    HEAP_NUMBER,
+    STRING,  // Only used for addition operation.  At least one string operand.
+    GENERIC
+  };
+
+  TRBinaryOpIC() : IC(NO_EXTRA_FRAME) { }
+
+  void patch(Code* code);
+
+  static const char* GetName(TypeInfo type_info);
+
+  static State ToState(TypeInfo type_info);
+
+  static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
+
+  static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
+};
+
+
+class CompareIC: public IC {
+ public:
+  enum State {
+    UNINITIALIZED,
+    SMIS,
+    HEAP_NUMBERS,
+    OBJECTS,
+    GENERIC
+  };
+
+  explicit CompareIC(Token::Value op) : IC(EXTRA_CALL_FRAME), op_(op) { }
+
+  // Update the inline cache for the given operands.
+  void UpdateCaches(Handle<Object> x, Handle<Object> y);
+
+  // Factory method for getting an uninitialized compare stub.
+  static Handle<Code> GetUninitialized(Token::Value op);
+
+  // Helper function for computing the condition for a compare operation.
+  static Condition ComputeCondition(Token::Value op);
+
+  // Helper function for determining the state of a compare IC.
+  static State ComputeState(Code* target);
+
+  static const char* GetStateName(State state);
+
+ private:
+  State TargetState(State state, bool has_inlined_smi_code,
+                    Handle<Object> x, Handle<Object> y);
+
+  bool strict() const { return op_ == Token::EQ_STRICT; }
+  Condition GetCondition() const { return ComputeCondition(op_); }
+  State GetState() { return ComputeState(target()); }
+
+  Token::Value op_;
+};
+
+// Helper for TRBinaryOpIC and CompareIC.
+void PatchInlinedSmiCode(Address address);
+
 } }  // namespace v8::internal
 
 #endif  // V8_IC_H_
diff --git a/src/json.js b/src/json.js
index 5993100..89009a9 100644
--- a/src/json.js
+++ b/src/json.js
@@ -66,51 +66,10 @@
   }
 }
 
-var characterQuoteCache = {
-  '\b': '\\b',  // ASCII 8, Backspace
-  '\t': '\\t',  // ASCII 9, Tab
-  '\n': '\\n',  // ASCII 10, Newline
-  '\f': '\\f',  // ASCII 12, Formfeed
-  '\r': '\\r',  // ASCII 13, Carriage Return
-  '\"': '\\"',
-  '\\': '\\\\'
-};
-
-function QuoteSingleJSONCharacter(c) {
-  if (c in characterQuoteCache) {
-    return characterQuoteCache[c];
-  }
-  var charCode = c.charCodeAt(0);
-  var result;
-  if (charCode < 16) result = '\\u000';
-  else if (charCode < 256) result = '\\u00';
-  else if (charCode < 4096) result = '\\u0';
-  else result = '\\u';
-  result += charCode.toString(16);
-  characterQuoteCache[c] = result;
-  return result;
-}
-
-function QuoteJSONString(str) {
-  var quotable = /[\\\"\x00-\x1f]/g;
-  return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
-}
-
-function StackContains(stack, val) {
-  var length = stack.length;
-  for (var i = 0; i < length; i++) {
-    if (stack[i] === val) {
-      return true;
-    }
-  }
-  return false;
-}
-
 function SerializeArray(value, replacer, stack, indent, gap) {
-  if (StackContains(stack, value)) {
+  if (!%PushIfAbsent(stack, value)) {
     throw MakeTypeError('circular_structure', []);
   }
-  stack.push(value);
   var stepback = indent;
   indent += gap;
   var partial = [];
@@ -138,10 +97,9 @@
 }
 
 function SerializeObject(value, replacer, stack, indent, gap) {
-  if (StackContains(stack, value)) {
+  if (!%PushIfAbsent(stack, value)) {
     throw MakeTypeError('circular_structure', []);
   }
-  stack.push(value);
   var stepback = indent;
   indent += gap;
   var partial = [];
@@ -152,7 +110,7 @@
         var p = replacer[i];
         var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
         if (!IS_UNDEFINED(strP)) {
-          var member = QuoteJSONString(p) + ":";
+          var member = %QuoteJSONString(p) + ":";
           if (gap != "") member += " ";
           member += strP;
           partial.push(member);
@@ -164,7 +122,7 @@
       if (ObjectHasOwnProperty.call(value, p)) {
         var strP = JSONSerialize(p, value, replacer, stack, indent, gap);
         if (!IS_UNDEFINED(strP)) {
-          var member = QuoteJSONString(p) + ":";
+          var member = %QuoteJSONString(p) + ":";
           if (gap != "") member += " ";
           member += strP;
           partial.push(member);
@@ -188,71 +146,159 @@
 
 function JSONSerialize(key, holder, replacer, stack, indent, gap) {
   var value = holder[key];
-  if (IS_OBJECT(value) && value) {
+  if (IS_SPEC_OBJECT(value)) {
     var toJSON = value.toJSON;
     if (IS_FUNCTION(toJSON)) {
-      value = toJSON.call(value, key);
+      value = %_CallFunction(value, key, toJSON);
     }
   }
   if (IS_FUNCTION(replacer)) {
-    value = replacer.call(holder, key, value);
+    value = %_CallFunction(holder, key, value, replacer);
   }
-  // Unwrap value if necessary
-  if (IS_OBJECT(value)) {
-    if (IS_NUMBER_WRAPPER(value)) {
-      value = $Number(value);
+  if (IS_STRING(value)) {
+    return %QuoteJSONString(value);
+  } else if (IS_NUMBER(value)) {
+    return $isFinite(value) ? $String(value) : "null";
+  } else if (IS_BOOLEAN(value)) {
+    return value ? "true" : "false";
+  } else if (IS_NULL(value)) {
+    return "null";
+  } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
+    // Non-callable object. If it's a primitive wrapper, it must be unwrapped.
+    if (IS_ARRAY(value)) {
+      return SerializeArray(value, replacer, stack, indent, gap);
+    } else if (IS_NUMBER_WRAPPER(value)) {
+      value = ToNumber(value);
+      return $isFinite(value) ? ToString(value) : "null";
     } else if (IS_STRING_WRAPPER(value)) {
-      value = $String(value);
+      return %QuoteJSONString(ToString(value));
     } else if (IS_BOOLEAN_WRAPPER(value)) {
-      value =  %_ValueOf(value);
+      return %_ValueOf(value) ? "true" : "false";
+    } else {
+      return SerializeObject(value, replacer, stack, indent, gap);
     }
   }
-  switch (typeof value) {
-    case "string":
-      return QuoteJSONString(value);
-    case "object":
-      if (!value) {
-        return "null";
-      } else if (IS_ARRAY(value)) {
-        return SerializeArray(value, replacer, stack, indent, gap);
+  // Undefined or a callable object.
+  return void 0;
+}
+
+
+function BasicSerializeArray(value, stack, builder) {
+  if (!%PushIfAbsent(stack, value)) {
+    throw MakeTypeError('circular_structure', []);
+  }
+  builder.push("[");
+  var len = value.length;
+  for (var i = 0; i < len; i++) {
+    var before = builder.length;
+    BasicJSONSerialize(i, value, stack, builder);
+    if (before == builder.length) builder.push("null");
+    builder.push(",");
+  }
+  stack.pop();
+  if (builder.pop() != ",") {
+    builder.push("[]");  // Zero length array. Push "[" back on.
+  } else {
+    builder.push("]");
+  }
+
+}
+
+
+function BasicSerializeObject(value, stack, builder) {
+  if (!%PushIfAbsent(stack, value)) {
+    throw MakeTypeError('circular_structure', []);
+  }
+  builder.push("{");
+  for (var p in value) {
+    if (%HasLocalProperty(value, p)) {
+      builder.push(%QuoteJSONString(p));
+      builder.push(":");
+      var before = builder.length;
+      BasicJSONSerialize(p, value, stack, builder);
+      if (before == builder.length) {
+        builder.pop();
+        builder.pop();
       } else {
-        return SerializeObject(value, replacer, stack, indent, gap);
+        builder.push(",");
       }
-    case "number":
-      return $isFinite(value) ? $String(value) : "null";
-    case "boolean":
-      return value ? "true" : "false";
+    }
+  }
+  stack.pop();
+  if (builder.pop() != ",") {
+    builder.push("{}");  // Object has no own properties. Push "{" back on.
+  } else {
+    builder.push("}");
   }
 }
 
+
+function BasicJSONSerialize(key, holder, stack, builder) {
+  var value = holder[key];
+  if (IS_SPEC_OBJECT(value)) {
+    var toJSON = value.toJSON;
+    if (IS_FUNCTION(toJSON)) {
+      value = %_CallFunction(value, ToString(key), toJSON);
+    }
+  }
+  if (IS_STRING(value)) {
+    builder.push(%QuoteJSONString(value));
+  } else if (IS_NUMBER(value)) {
+    builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
+  } else if (IS_BOOLEAN(value)) {
+    builder.push(value ? "true" : "false");
+  } else if (IS_NULL(value)) {
+    builder.push("null");
+  } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
+    // Value is a non-callable object.
+    // Unwrap value if necessary
+    if (IS_NUMBER_WRAPPER(value)) {
+      value = ToNumber(value);
+      builder.push(($isFinite(value) ? %_NumberToString(value) : "null"));
+    } else if (IS_STRING_WRAPPER(value)) {
+      builder.push(%QuoteJSONString(ToString(value)));
+    } else if (IS_BOOLEAN_WRAPPER(value)) {
+      builder.push(%_ValueOf(value) ? "true" : "false");
+    } else if (IS_ARRAY(value)) {
+      BasicSerializeArray(value, stack, builder);
+    } else {
+      BasicSerializeObject(value, stack, builder);
+    }
+  }
+}
+
+
 function JSONStringify(value, replacer, space) {
-  var stack = [];
-  var indent = "";
+  if (%_ArgumentsLength() == 1) {
+    var builder = [];
+    BasicJSONSerialize('', {'': value}, [], builder);
+    if (builder.length == 0) return;
+    var result = %_FastAsciiArrayJoin(builder, "");
+    if (!IS_UNDEFINED(result)) return result;
+    return %StringBuilderConcat(builder, builder.length, "");
+  }
   if (IS_OBJECT(space)) {
     // Unwrap 'space' if it is wrapped
     if (IS_NUMBER_WRAPPER(space)) {
-      space = $Number(space);
+      space = ToNumber(space);
     } else if (IS_STRING_WRAPPER(space)) {
-      space = $String(space);
+      space = ToString(space);
     }
   }
   var gap;
   if (IS_NUMBER(space)) {
-    space = $Math.min(ToInteger(space), 10);
-    gap = "";
-    for (var i = 0; i < space; i++) {
-      gap += " ";
-    }
+    space = MathMax(0, MathMin(ToInteger(space), 10));
+    gap = SubString("          ", 0, space);
   } else if (IS_STRING(space)) {
     if (space.length > 10) {
-      gap = space.substring(0, 10);
+      gap = SubString(space, 0, 10);
     } else {
       gap = space;
     }
   } else {
     gap = "";
   }
-  return JSONSerialize('', {'': value}, replacer, stack, indent, gap);
+  return JSONSerialize('', {'': value}, replacer, [], "", gap);
 }
 
 function SetupJSON() {
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 8cd13bc..e0f2e62 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -33,6 +33,7 @@
 #include "factory.h"
 #include "jsregexp.h"
 #include "platform.h"
+#include "string-search.h"
 #include "runtime.h"
 #include "top.h"
 #include "compilation-cache.h"
@@ -120,7 +121,7 @@
     re->set_data(*cached);
     return re;
   }
-  FlattenString(pattern);
+  pattern = FlattenGetString(pattern);
   CompilationZoneScope zone_scope(DELETE_ON_EXIT);
   PostponeInterruptsScope postpone;
   RegExpCompileData parse_result;
@@ -205,23 +206,61 @@
   RegExpImpl::SetCapture(array, 1, to);
 }
 
+  /* template <typename SubjectChar>, typename PatternChar>
+static int ReStringMatch(Vector<const SubjectChar> sub_vector,
+                         Vector<const PatternChar> pat_vector,
+                         int start_index) {
 
+  int pattern_length = pat_vector.length();
+  if (pattern_length == 0) return start_index;
+
+  int subject_length = sub_vector.length();
+  if (start_index + pattern_length > subject_length) return -1;
+  return SearchString(sub_vector, pat_vector, start_index);
+}
+  */
 Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
                                     Handle<String> subject,
                                     int index,
                                     Handle<JSArray> last_match_info) {
-  Handle<String> needle(String::cast(re->DataAt(JSRegExp::kAtomPatternIndex)));
+  ASSERT(0 <= index);
+  ASSERT(index <= subject->length());
 
-  uint32_t start_index = index;
+  if (!subject->IsFlat()) FlattenString(subject);
+  AssertNoAllocation no_heap_allocation;  // ensure vectors stay valid
+  // Extract flattened substrings of cons strings before determining asciiness.
+  String* seq_sub = *subject;
+  if (seq_sub->IsConsString()) seq_sub = ConsString::cast(seq_sub)->first();
 
-  int value = Runtime::StringMatch(subject, needle, start_index);
-  if (value == -1) return Factory::null_value();
+  String* needle = String::cast(re->DataAt(JSRegExp::kAtomPatternIndex));
+  int needle_len = needle->length();
+
+  if (needle_len != 0) {
+    if (index + needle_len > subject->length()) return Factory::null_value();
+    // dispatch on type of strings
+    index = (needle->IsAsciiRepresentation()
+             ? (seq_sub->IsAsciiRepresentation()
+                ? SearchString(seq_sub->ToAsciiVector(),
+                               needle->ToAsciiVector(),
+                               index)
+                : SearchString(seq_sub->ToUC16Vector(),
+                               needle->ToAsciiVector(),
+                               index))
+             : (seq_sub->IsAsciiRepresentation()
+                ? SearchString(seq_sub->ToAsciiVector(),
+                               needle->ToUC16Vector(),
+                               index)
+                : SearchString(seq_sub->ToUC16Vector(),
+                               needle->ToUC16Vector(),
+                               index)));
+    if (index == -1) return Factory::null_value();
+  }
   ASSERT(last_match_info->HasFastElements());
 
   {
     NoHandleAllocation no_handles;
     FixedArray* array = FixedArray::cast(last_match_info->elements());
-    SetAtomLastCapture(array, *subject, value, value + needle->length());
+    SetAtomLastCapture(array, *subject, index, index + needle_len);
   }
   return last_match_info;
 }
@@ -364,7 +403,7 @@
     AssertNoAllocation no_gc;
     String* sequential_string = *subject;
     if (subject->IsConsString()) {
-      sequential_string =  ConsString::cast(*subject)->first();
+      sequential_string = ConsString::cast(*subject)->first();
     }
     is_ascii = sequential_string->IsAsciiRepresentation();
   }
@@ -1611,41 +1650,64 @@
 }
 
 
-int ActionNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+int ActionNode::EatsAtLeast(int still_to_find,
+                            int recursion_depth,
+                            bool not_at_start) {
   if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
   if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0;  // Rewinds input!
-  return on_success()->EatsAtLeast(still_to_find, recursion_depth + 1);
+  return on_success()->EatsAtLeast(still_to_find,
+                                   recursion_depth + 1,
+                                   not_at_start);
 }
 
 
-int AssertionNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+int AssertionNode::EatsAtLeast(int still_to_find,
+                               int recursion_depth,
+                               bool not_at_start) {
   if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
-  return on_success()->EatsAtLeast(still_to_find, recursion_depth + 1);
+  // If we know we are not at the start and we are asked "how many characters
+  // will you match if you succeed?" then we can answer anything since false
+  // implies false.  So lets just return the max answer (still_to_find) since
+  // that won't prevent us from preloading a lot of characters for the other
+  // branches in the node graph.
+  if (type() == AT_START && not_at_start) return still_to_find;
+  return on_success()->EatsAtLeast(still_to_find,
+                                   recursion_depth + 1,
+                                   not_at_start);
 }
 
 
-int BackReferenceNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+int BackReferenceNode::EatsAtLeast(int still_to_find,
+                                   int recursion_depth,
+                                   bool not_at_start) {
   if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
-  return on_success()->EatsAtLeast(still_to_find, recursion_depth + 1);
+  return on_success()->EatsAtLeast(still_to_find,
+                                   recursion_depth + 1,
+                                   not_at_start);
 }
 
 
-int TextNode::EatsAtLeast(int still_to_find, int recursion_depth) {
+int TextNode::EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start) {
   int answer = Length();
   if (answer >= still_to_find) return answer;
   if (recursion_depth > RegExpCompiler::kMaxRecursion) return answer;
+  // We are not at start after this node so we set the last argument to 'true'.
   return answer + on_success()->EatsAtLeast(still_to_find - answer,
-                                            recursion_depth + 1);
+                                            recursion_depth + 1,
+                                            true);
 }
 
 
 int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
-                                             int recursion_depth) {
+                                             int recursion_depth,
+                                             bool not_at_start) {
   if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
   // Alternative 0 is the negative lookahead, alternative 1 is what comes
   // afterwards.
   RegExpNode* node = alternatives_->at(1).node();
-  return node->EatsAtLeast(still_to_find, recursion_depth + 1);
+  return node->EatsAtLeast(still_to_find, recursion_depth + 1, not_at_start);
 }
 
 
@@ -1663,7 +1725,8 @@
 
 int ChoiceNode::EatsAtLeastHelper(int still_to_find,
                                   int recursion_depth,
-                                  RegExpNode* ignore_this_node) {
+                                  RegExpNode* ignore_this_node,
+                                  bool not_at_start) {
   if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
   int min = 100;
   int choice_count = alternatives_->length();
@@ -1671,20 +1734,31 @@
     RegExpNode* node = alternatives_->at(i).node();
     if (node == ignore_this_node) continue;
     int node_eats_at_least = node->EatsAtLeast(still_to_find,
-                                               recursion_depth + 1);
+                                               recursion_depth + 1,
+                                               not_at_start);
     if (node_eats_at_least < min) min = node_eats_at_least;
   }
   return min;
 }
 
 
-int LoopChoiceNode::EatsAtLeast(int still_to_find, int recursion_depth) {
-  return EatsAtLeastHelper(still_to_find, recursion_depth, loop_node_);
+int LoopChoiceNode::EatsAtLeast(int still_to_find,
+                                int recursion_depth,
+                                bool not_at_start) {
+  return EatsAtLeastHelper(still_to_find,
+                           recursion_depth,
+                           loop_node_,
+                           not_at_start);
 }
 
 
-int ChoiceNode::EatsAtLeast(int still_to_find, int recursion_depth) {
-  return EatsAtLeastHelper(still_to_find, recursion_depth, NULL);
+int ChoiceNode::EatsAtLeast(int still_to_find,
+                            int recursion_depth,
+                            bool not_at_start) {
+  return EatsAtLeastHelper(still_to_find,
+                           recursion_depth,
+                           NULL,
+                           not_at_start);
 }
 
 
@@ -2591,8 +2665,9 @@
 }
 
 
-int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) {
-  int preload_characters = EatsAtLeast(4, 0);
+int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler,
+                                           bool not_at_start) {
+  int preload_characters = EatsAtLeast(4, 0, not_at_start);
   if (compiler->macro_assembler()->CanReadUnaligned()) {
     bool ascii = compiler->ascii();
     if (ascii) {
@@ -2800,7 +2875,9 @@
 
   int first_normal_choice = greedy_loop ? 1 : 0;
 
-  int preload_characters = CalculatePreloadCharacters(compiler);
+  int preload_characters =
+      CalculatePreloadCharacters(compiler,
+                                 current_trace->at_start() == Trace::FALSE);
   bool preload_is_current =
       (current_trace->characters_preloaded() == preload_characters);
   bool preload_has_checked_bounds = preload_is_current;
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 87adf55..6f04be3 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -596,8 +596,13 @@
   // How many characters must this node consume at a minimum in order to
   // succeed.  If we have found at least 'still_to_find' characters that
   // must be consumed there is no need to ask any following nodes whether
-  // they are sure to eat any more characters.
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth) = 0;
+  // they are sure to eat any more characters.  The not_at_start argument is
+  // used to indicate that we know we are not at the start of the input.  In
+  // this case anchored branches will always fail and can be ignored when
+  // determining how many characters are consumed on success.
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start) = 0;
   // Emits some quick code that checks whether the preloaded characters match.
   // Falls through on certain failure, jumps to the label on possible success.
   // If the node cannot make a quick check it does nothing and returns false.
@@ -765,7 +770,9 @@
                                      RegExpNode* on_success);
   virtual void Accept(NodeVisitor* visitor);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int filled_in,
@@ -829,7 +836,9 @@
   }
   virtual void Accept(NodeVisitor* visitor);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
@@ -897,7 +906,9 @@
   }
   virtual void Accept(NodeVisitor* visitor);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int filled_in,
@@ -925,7 +936,9 @@
   int start_register() { return start_reg_; }
   int end_register() { return end_reg_; }
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
@@ -946,7 +959,9 @@
   explicit EndNode(Action action) : action_(action) { }
   virtual void Accept(NodeVisitor* visitor);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth) { return 0; }
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start) { return 0; }
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
@@ -1028,10 +1043,13 @@
   ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
   DispatchTable* GetTable(bool ignore_case);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   int EatsAtLeastHelper(int still_to_find,
                         int recursion_depth,
-                        RegExpNode* ignore_this_node);
+                        RegExpNode* ignore_this_node,
+                        bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
@@ -1054,7 +1072,7 @@
   void GenerateGuard(RegExpMacroAssembler* macro_assembler,
                      Guard* guard,
                      Trace* trace);
-  int CalculatePreloadCharacters(RegExpCompiler* compiler);
+  int CalculatePreloadCharacters(RegExpCompiler* compiler, bool not_at_start);
   void EmitOutOfLineContinuation(RegExpCompiler* compiler,
                                  Trace* trace,
                                  GuardedAlternative alternative,
@@ -1077,7 +1095,9 @@
     AddAlternative(this_must_fail);
     AddAlternative(then_do_this);
   }
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
@@ -1102,7 +1122,9 @@
   void AddLoopAlternative(GuardedAlternative alt);
   void AddContinueAlternative(GuardedAlternative alt);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
-  virtual int EatsAtLeast(int still_to_find, int recursion_depth);
+  virtual int EatsAtLeast(int still_to_find,
+                          int recursion_depth,
+                          bool not_at_start);
   virtual void GetQuickCheckDetails(QuickCheckDetails* details,
                                     RegExpCompiler* compiler,
                                     int characters_filled_in,
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 5ca4d60..0d65306 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -152,6 +152,7 @@
  public:
   // Construct a break target.
   inline BreakTarget();
+
   inline BreakTarget(JumpTarget::Directionality direction);
 
   virtual ~BreakTarget() {}
diff --git a/src/list-inl.h b/src/list-inl.h
index e277bc8..eeaea65 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -96,6 +96,17 @@
 
 
 template<typename T, class P>
+void List<T, P>::InsertAt(int index, const T& elm) {
+  ASSERT(index >= 0 && index <= length_);
+  Add(elm);
+  for (int i = length_ - 1; i > index; --i) {
+    data_[i] = data_[i - 1];
+  }
+  data_[index] = elm;
+}
+
+
+template<typename T, class P>
 T List<T, P>::Remove(int i) {
   T element = at(i);
   length_--;
@@ -108,6 +119,18 @@
 
 
 template<typename T, class P>
+bool List<T, P>::RemoveElement(const T& elm) {
+  for (int i = 0; i < length_; i++) {
+    if (data_[i] == elm) {
+      Remove(i);
+      return true;
+    }
+  }
+  return false;
+}
+
+
+template<typename T, class P>
 void List<T, P>::Clear() {
   DeleteData(data_);
   Initialize(0);
@@ -134,7 +157,7 @@
 
 
 template<typename T, class P>
-bool List<T, P>::Contains(const T& elm) {
+bool List<T, P>::Contains(const T& elm) const {
   for (int i = 0; i < length_; i++) {
     if (data_[i] == elm)
       return true;
@@ -144,6 +167,16 @@
 
 
 template<typename T, class P>
+int List<T, P>::CountOccurrences(const T& elm, int start, int end) const {
+  int result = 0;
+  for (int i = start; i <= end; i++) {
+    if (data_[i] == elm) ++result;
+  }
+  return result;
+}
+
+
+template<typename T, class P>
 void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) {
   ToVector().Sort(cmp);
 #ifdef DEBUG
diff --git a/src/list.h b/src/list.h
index 24f3494..9a2e698 100644
--- a/src/list.h
+++ b/src/list.h
@@ -91,6 +91,9 @@
   // Add all the elements from the argument list to this list.
   void AddAll(const List<T, P>& other);
 
+  // Inserts the element at the specific index.
+  void InsertAt(int index, const T& element);
+
   // Added 'count' elements with the value 'value' and returns a
   // vector that allows access to the elements.  The vector is valid
   // until the next change is made to this list.
@@ -102,6 +105,10 @@
   // size of the list.
   T Remove(int i);
 
+  // Remove the given element from the list. Returns whether or not
+  // the input is included in the list in the first place.
+  bool RemoveElement(const T& elm);
+
   // Removes the last element without deleting it even if T is a
   // pointer type. Returns the removed element.
   INLINE(T RemoveLast()) { return Remove(length_ - 1); }
@@ -113,7 +120,11 @@
   // Drops all but the first 'pos' elements from the list.
   INLINE(void Rewind(int pos));
 
-  bool Contains(const T& elm);
+  // Drop the last 'count' elements from the list.
+  INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
+
+  bool Contains(const T& elm) const;
+  int CountOccurrences(const T& elm, int start, int end) const;
 
   // Iterate through all list entries, starting at index 0.
   void Iterate(void (*callback)(T* x));
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
new file mode 100644
index 0000000..ac61c17
--- /dev/null
+++ b/src/lithium-allocator.cc
@@ -0,0 +1,2116 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "lithium-allocator.h"
+
+#include "data-flow.h"
+#include "hydrogen.h"
+#include "string-stream.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/lithium-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/lithium-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/lithium-arm.h"
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+#define DEFINE_OPERAND_CACHE(name, type)            \
+  name name::cache[name::kNumCachedOperands];       \
+  void name::SetupCache() {                         \
+    for (int i = 0; i < kNumCachedOperands; i++) {  \
+      cache[i].ConvertTo(type, i);                  \
+    }                                               \
+  }
+
+DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
+DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
+DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
+DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
+DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
+
+#undef DEFINE_OPERAND_CACHE
+
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+  return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+  return a.Value() > b.Value() ? a : b;
+}
+
+
+void LOperand::PrintTo(StringStream* stream) {
+  LUnallocated* unalloc = NULL;
+  switch (kind()) {
+    case INVALID:
+      break;
+    case UNALLOCATED:
+      unalloc = LUnallocated::cast(this);
+      stream->Add("v%d", unalloc->virtual_register());
+      switch (unalloc->policy()) {
+        case LUnallocated::NONE:
+          break;
+        case LUnallocated::FIXED_REGISTER: {
+          const char* register_name =
+              Register::AllocationIndexToString(unalloc->fixed_index());
+          stream->Add("(=%s)", register_name);
+          break;
+        }
+        case LUnallocated::FIXED_DOUBLE_REGISTER: {
+          const char* double_register_name =
+              DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+          stream->Add("(=%s)", double_register_name);
+          break;
+        }
+        case LUnallocated::FIXED_SLOT:
+          stream->Add("(=%dS)", unalloc->fixed_index());
+          break;
+        case LUnallocated::MUST_HAVE_REGISTER:
+          stream->Add("(R)");
+          break;
+        case LUnallocated::WRITABLE_REGISTER:
+          stream->Add("(WR)");
+          break;
+        case LUnallocated::SAME_AS_FIRST_INPUT:
+          stream->Add("(1)");
+          break;
+        case LUnallocated::SAME_AS_ANY_INPUT:
+          stream->Add("(A)");
+          break;
+        case LUnallocated::ANY:
+          stream->Add("(-)");
+          break;
+        case LUnallocated::IGNORE:
+          stream->Add("(0)");
+          break;
+      }
+      break;
+    case CONSTANT_OPERAND:
+      stream->Add("[constant:%d]", index());
+      break;
+    case STACK_SLOT:
+      stream->Add("[stack:%d]", index());
+      break;
+    case DOUBLE_STACK_SLOT:
+      stream->Add("[double_stack:%d]", index());
+      break;
+    case REGISTER:
+      stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+      break;
+    case DOUBLE_REGISTER:
+      stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+      break;
+    case ARGUMENT:
+      stream->Add("[arg:%d]", index());
+      break;
+  }
+}
+
+int LOperand::VirtualRegister() {
+  LUnallocated* unalloc = LUnallocated::cast(this);
+  return unalloc->virtual_register();
+}
+
+
+bool UsePosition::RequiresRegister() const {
+  return requires_reg_;
+}
+
+
+bool UsePosition::RegisterIsBeneficial() const {
+  return register_beneficial_;
+}
+
+
+void UseInterval::SplitAt(LifetimePosition pos) {
+  ASSERT(Contains(pos) && pos.Value() != start().Value());
+  UseInterval* after = new UseInterval(pos, end_);
+  after->next_ = next_;
+  next_ = after;
+  end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+  UsePosition* cur = first_pos_;
+  while (cur != NULL) {
+    ASSERT(Start().Value() <= cur->pos().Value() &&
+           cur->pos().Value() <= End().Value());
+    cur = cur->next();
+  }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+  UseInterval* current_interval = first_interval_;
+  while (current_interval != NULL) {
+    // Intervals overlap if the start of one is contained in the other.
+    if (current_interval->Contains(target->start()) ||
+        target->Contains(current_interval->start())) {
+      return true;
+    }
+    current_interval = current_interval->next();
+  }
+  return false;
+}
+
+
+#endif
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+  UsePosition* use_pos = last_processed_use_;
+  if (use_pos == NULL) use_pos = first_pos();
+  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+    use_pos = use_pos->next();
+  }
+  last_processed_use_ = use_pos;
+  return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RequiresRegister()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+  // TODO(kmillikin): Comment. Now.
+  if (pos.Value() <= Start().Value() && HasRegisterAssigned()) return false;
+
+  // We cannot spill a live range that has a use requiring a register
+  // at the current or the immediate next position.
+  UsePosition* use_pos = NextRegisterPosition(pos);
+  if (use_pos == NULL) return true;
+  return use_pos->pos().Value() > pos.NextInstruction().Value();
+}
+
+
+UsePosition* LiveRange::FirstPosWithHint() const {
+  UsePosition* pos = first_pos_;
+  while (pos != NULL && !pos->HasHint()) pos = pos->next();
+  return pos;
+}
+
+
+LOperand* LiveRange::CreateAssignedOperand() {
+  LOperand* op = NULL;
+  if (HasRegisterAssigned()) {
+    ASSERT(!IsSpilled());
+    if (IsDouble()) {
+      op = LDoubleRegister::Create(assigned_register());
+    } else {
+      op = LRegister::Create(assigned_register());
+    }
+  } else if (IsSpilled()) {
+    ASSERT(!HasRegisterAssigned());
+    op = TopLevel()->GetSpillOperand();
+    ASSERT(!op->IsUnallocated());
+  } else {
+    LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+    unalloc->set_virtual_register(id_);
+    op = unalloc;
+  }
+  return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+    LifetimePosition position) const {
+  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_->start().Value() > position.Value()) {
+    current_interval_ = NULL;
+    return first_interval_;
+  }
+  return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+    UseInterval* to_start_of, LifetimePosition but_not_past) const {
+  if (to_start_of == NULL) return;
+  if (to_start_of->start().Value() > but_not_past.Value()) return;
+  LifetimePosition start =
+      current_interval_ == NULL ? LifetimePosition::Invalid()
+                                : current_interval_->start();
+  if (to_start_of->start().Value() > start.Value()) {
+    current_interval_ = to_start_of;
+  }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
+  ASSERT(Start().Value() < position.Value());
+  ASSERT(result->IsEmpty());
+  // Find the last interval that ends before the position. If the
+  // position is contained in one of the intervals in the chain, we
+  // split that interval and use the first part.
+  UseInterval* current = FirstSearchIntervalForPosition(position);
+
+  // If the split position coincides with the beginning of a use interval
+  // we need to split use positons in a special way.
+  bool split_at_start = false;
+
+  while (current != NULL) {
+    if (current->Contains(position)) {
+      current->SplitAt(position);
+      break;
+    }
+    UseInterval* next = current->next();
+    if (next->start().Value() >= position.Value()) {
+      split_at_start = (next->start().Value() == position.Value());
+      break;
+    }
+    current = next;
+  }
+
+  // Partition original use intervals to the two live ranges.
+  UseInterval* before = current;
+  UseInterval* after = before->next();
+  result->last_interval_ = (last_interval_ == before)
+      ? after            // Only interval in the range after split.
+      : last_interval_;  // Last interval of the original range.
+  result->first_interval_ = after;
+  last_interval_ = before;
+
+  // Find the last use position before the split and the first use
+  // position after it.
+  UsePosition* use_after = first_pos_;
+  UsePosition* use_before = NULL;
+  if (split_at_start) {
+    // The split position coincides with the beginning of a use interval (the
+    // end of a lifetime hole). Use at this position should be attributed to
+    // the split child because split child owns use interval covering it.
+    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  } else {
+    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  }
+
+  // Partition original use positions to the two live ranges.
+  if (use_before != NULL) {
+    use_before->next_ = NULL;
+  } else {
+    first_pos_ = NULL;
+  }
+  result->first_pos_ = use_after;
+
+  // Link the new live range in the chain before any of the other
+  // ranges linked from the range before the split.
+  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->next_ = next_;
+  next_ = result;
+
+#ifdef DEBUG
+  Verify();
+  result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions.  This is needed for the correctness of the register
+// allocation algorithm.  If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used.  This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+  LifetimePosition start = Start();
+  LifetimePosition other_start = other->Start();
+  if (start.Value() == other_start.Value()) {
+    UsePosition* pos = FirstPosWithHint();
+    if (pos == NULL) return false;
+    UsePosition* other_pos = other->first_pos();
+    if (other_pos == NULL) return true;
+    return pos->pos().Value() < other_pos->pos().Value();
+  }
+  return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+  LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
+  ASSERT(first_interval_ != NULL);
+  ASSERT(first_interval_->start().Value() <= start.Value());
+  ASSERT(start.Value() < first_interval_->end().Value());
+  first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
+  LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+                         id_,
+                         start.Value(),
+                         end.Value());
+  LifetimePosition new_end = end;
+  while (first_interval_ != NULL &&
+         first_interval_->start().Value() <= end.Value()) {
+    if (first_interval_->end().Value() > end.Value()) {
+      new_end = first_interval_->end();
+    }
+    first_interval_ = first_interval_->next();
+  }
+
+  UseInterval* new_interval = new UseInterval(start, new_end);
+  new_interval->next_ = first_interval_;
+  first_interval_ = new_interval;
+  if (new_interval->next() == NULL) {
+    last_interval_ = new_interval;
+  }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) {
+  LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
+                         id_,
+                         start.Value(),
+                         end.Value());
+  if (first_interval_ == NULL) {
+    UseInterval* interval = new UseInterval(start, end);
+    first_interval_ = interval;
+    last_interval_ = interval;
+  } else {
+    if (end.Value() == first_interval_->start().Value()) {
+      first_interval_->set_start(start);
+    } else if (end.Value() < first_interval_->start().Value()) {
+      UseInterval* interval = new UseInterval(start, end);
+      interval->set_next(first_interval_);
+      first_interval_ = interval;
+    } else {
+      // Order of instruction's processing (see ProcessInstructions) guarantees
+      // that each new use interval either precedes or intersects with
+      // last added interval.
+      ASSERT(start.Value() < first_interval_->end().Value());
+      first_interval_->start_ = Min(start, first_interval_->start_);
+      first_interval_->end_ = Max(end, first_interval_->end_);
+    }
+  }
+}
+
+
+UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
+                                       LOperand* operand) {
+  LAllocator::TraceAlloc("Add to live range %d use position %d\n",
+                         id_,
+                         pos.Value());
+  UsePosition* use_pos = new UsePosition(pos, operand);
+  UsePosition* prev = NULL;
+  UsePosition* current = first_pos_;
+  while (current != NULL && current->pos().Value() < pos.Value()) {
+    prev = current;
+    current = current->next();
+  }
+
+  if (prev == NULL) {
+    use_pos->set_next(first_pos_);
+    first_pos_ = use_pos;
+  } else {
+    use_pos->next_ = prev->next_;
+    prev->next_ = use_pos;
+  }
+
+  return use_pos;
+}
+
+
+void LiveRange::ConvertOperands() {
+  LOperand* op = CreateAssignedOperand();
+  UsePosition* use_pos = first_pos();
+  while (use_pos != NULL) {
+    ASSERT(Start().Value() <= use_pos->pos().Value() &&
+           use_pos->pos().Value() <= End().Value());
+
+    if (use_pos->HasOperand()) {
+      ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
+             !use_pos->RequiresRegister());
+      use_pos->operand()->ConvertTo(op->kind(), op->index());
+    }
+    use_pos = use_pos->next();
+  }
+}
+
+
+UsePosition* LiveRange::AddUsePosition(LifetimePosition pos) {
+  return AddUsePosition(pos, CreateAssignedOperand());
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+  if (IsEmpty()) return false;
+  return Start().Value() <= position.Value() &&
+         position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+  if (!CanCover(position)) return false;
+  UseInterval* start_search = FirstSearchIntervalForPosition(position);
+  for (UseInterval* interval = start_search;
+       interval != NULL;
+       interval = interval->next()) {
+    ASSERT(interval->next() == NULL ||
+           interval->next()->start().Value() >= interval->start().Value());
+    AdvanceLastProcessedMarker(interval, position);
+    if (interval->Contains(position)) return true;
+    if (interval->start().Value() > position.Value()) return false;
+  }
+  return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+  UseInterval* b = other->first_interval();
+  if (b == NULL) return LifetimePosition::Invalid();
+  LifetimePosition advance_last_processed_up_to = b->start();
+  UseInterval* a = FirstSearchIntervalForPosition(b->start());
+  while (a != NULL && b != NULL) {
+    if (a->start().Value() > other->End().Value()) break;
+    if (b->start().Value() > End().Value()) break;
+    LifetimePosition cur_intersection = a->Intersect(b);
+    if (cur_intersection.IsValid()) {
+      return cur_intersection;
+    }
+    if (a->start().Value() < b->start().Value()) {
+      a = a->next();
+      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+    } else {
+      b = b->next();
+    }
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+void LAllocator::InitializeLivenessAnalysis() {
+  // Initialize the live_in sets for each block to NULL.
+  int block_count = graph()->blocks()->length();
+  live_in_sets_.Initialize(block_count);
+  live_in_sets_.AddBlock(NULL, block_count);
+}
+
+
+BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
+  // Compute live out for the given block, except not including backward
+  // successor edges.
+  BitVector* live_out = new BitVector(next_virtual_register_);
+
+  // Process all successor blocks.
+  HBasicBlock* successor = block->end()->FirstSuccessor();
+  while (successor != NULL) {
+    // Add values live on entry to the successor. Note the successor's
+    // live_in will not be computed yet for backwards edges.
+    BitVector* live_in = live_in_sets_[successor->block_id()];
+    if (live_in != NULL) live_out->Union(*live_in);
+
+    // All phi input operands corresponding to this successor edge are live
+    // out from this block.
+    int index = successor->PredecessorIndexOf(block);
+    const ZoneList<HPhi*>* phis = successor->phis();
+    for (int i = 0; i < phis->length(); ++i) {
+      HPhi* phi = phis->at(i);
+      if (!phi->OperandAt(index)->IsConstant()) {
+        live_out->Add(phi->OperandAt(index)->id());
+      }
+    }
+
+    // Check if we are done with second successor.
+    if (successor == block->end()->SecondSuccessor()) break;
+
+    successor = block->end()->SecondSuccessor();
+  }
+
+  return live_out;
+}
+
+
+void LAllocator::AddInitialIntervals(HBasicBlock* block,
+                                     BitVector* live_out) {
+  // Add an interval that includes the entire block to the live range for
+  // each live_out value.
+  LifetimePosition start = LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+  LifetimePosition end = LifetimePosition::FromInstructionIndex(
+      block->last_instruction_index()).NextInstruction();
+  BitVector::Iterator iterator(live_out);
+  while (!iterator.Done()) {
+    int operand_index = iterator.Current();
+    LiveRange* range = LiveRangeFor(operand_index);
+    range->AddUseInterval(start, end);
+    iterator.Advance();
+  }
+}
+
+
+int LAllocator::FixedDoubleLiveRangeID(int index) {
+  return -index - 1 - Register::kNumAllocatableRegisters;
+}
+
+
+LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
+                                    int pos,
+                                    bool is_tagged) {
+  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+  ASSERT(operand->HasFixedPolicy());
+  if (operand->policy() == LUnallocated::FIXED_SLOT) {
+    operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_index());
+  } else if (operand->policy() == LUnallocated::FIXED_REGISTER) {
+    int reg_index = operand->fixed_index();
+    operand->ConvertTo(LOperand::REGISTER, reg_index);
+  } else if (operand->policy() == LUnallocated::FIXED_DOUBLE_REGISTER) {
+    int reg_index = operand->fixed_index();
+    operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
+  } else {
+    UNREACHABLE();
+  }
+  if (is_tagged) {
+    TraceAlloc("Fixed reg is tagged at %d\n", pos);
+    LInstruction* instr = chunk_->instructions()->at(pos);
+    if (instr->HasPointerMap()) {
+      instr->pointer_map()->RecordPointer(operand);
+    }
+  }
+  return operand;
+}
+
+
+LiveRange* LAllocator::FixedLiveRangeFor(int index) {
+  if (index >= fixed_live_ranges_.length()) {
+    fixed_live_ranges_.AddBlock(NULL,
+                                index - fixed_live_ranges_.length() + 1);
+  }
+
+  LiveRange* result = fixed_live_ranges_[index];
+  if (result == NULL) {
+    result = new LiveRange(FixedLiveRangeID(index));
+    ASSERT(result->IsFixed());
+    result->set_assigned_register(index, GENERAL_REGISTERS);
+    fixed_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
+  if (index >= fixed_double_live_ranges_.length()) {
+    fixed_double_live_ranges_.AddBlock(NULL,
+                                index - fixed_double_live_ranges_.length() + 1);
+  }
+
+  LiveRange* result = fixed_double_live_ranges_[index];
+  if (result == NULL) {
+    result = new LiveRange(FixedDoubleLiveRangeID(index));
+    ASSERT(result->IsFixed());
+    result->set_assigned_register(index, DOUBLE_REGISTERS);
+    fixed_double_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+LiveRange* LAllocator::LiveRangeFor(int index) {
+  if (index >= live_ranges_.length()) {
+    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1);
+  }
+  LiveRange* result = live_ranges_[index];
+  if (result == NULL) {
+    result = new LiveRange(index);
+    live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LGap* LAllocator::GetLastGap(HBasicBlock* block) const {
+  int last_instruction = block->last_instruction_index();
+  int index = chunk_->NearestGapPos(last_instruction);
+  return chunk_->GetGapAt(index);
+}
+
+
+HPhi* LAllocator::LookupPhi(LOperand* operand) const {
+  if (!operand->IsUnallocated()) return NULL;
+  int index = operand->VirtualRegister();
+  HValue* instr = graph()->LookupValue(index);
+  if (instr != NULL && instr->IsPhi()) {
+    return HPhi::cast(instr);
+  }
+  return NULL;
+}
+
+
+LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
+  if (operand->IsUnallocated()) {
+    return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
+  } else if (operand->IsRegister()) {
+    return FixedLiveRangeFor(operand->index());
+  } else if (operand->IsDoubleRegister()) {
+    return FixedDoubleLiveRangeFor(operand->index());
+  } else {
+    return NULL;
+  }
+}
+
+
+void LAllocator::Define(LifetimePosition position,
+                        LOperand* operand,
+                        LOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+
+  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+    // Can happen if there is a definition without use.
+    range->AddUseInterval(position, position.NextInstruction());
+    range->AddUsePosition(position.NextInstruction(), NULL);
+  } else {
+    range->ShortenTo(position);
+  }
+
+  if (operand->IsUnallocated()) {
+    LUnallocated* unalloc_operand = LUnallocated::cast(operand);
+    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
+  }
+}
+
+
+void LAllocator::Use(LifetimePosition block_start,
+                     LifetimePosition position,
+                     LOperand* operand,
+                     LOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+  if (operand->IsUnallocated()) {
+    LUnallocated* unalloc_operand = LUnallocated::cast(operand);
+    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
+  }
+  range->AddUseInterval(block_start, position);
+}
+
+
+void LAllocator::AddConstraintsGapMove(int index,
+                                       LOperand* from,
+                                       LOperand* to) {
+  LGap* gap = chunk_->GetGapAt(index);
+  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+  if (from->IsUnallocated()) {
+    const ZoneList<LMoveOperands>* move_operands = move->move_operands();
+    for (int i = 0; i < move_operands->length(); ++i) {
+      LMoveOperands cur = move_operands->at(i);
+      LOperand* cur_to = cur.to();
+      if (cur_to->IsUnallocated()) {
+        if (cur_to->VirtualRegister() == from->VirtualRegister()) {
+          move->AddMove(cur.from(), to);
+          return;
+        }
+      }
+    }
+  }
+  move->AddMove(from, to);
+}
+
+
+void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
+  int start = block->first_instruction_index();
+  int end = block->last_instruction_index();
+  for (int i = start; i <= end; ++i) {
+    if (chunk_->IsGapAt(i)) {
+      InstructionSummary* summary = NULL;
+      InstructionSummary* prev_summary = NULL;
+      if (i < end) summary = GetSummary(i + 1);
+      if (i > start) prev_summary = GetSummary(i - 1);
+      MeetConstraintsBetween(prev_summary, summary, i);
+    }
+  }
+}
+
+
+void LAllocator::MeetConstraintsBetween(InstructionSummary* first,
+                                        InstructionSummary* second,
+                                        int gap_index) {
+  // Handle fixed temporaries.
+  if (first != NULL) {
+    for (int i = 0; i < first->TempCount(); ++i) {
+      LUnallocated* temp = LUnallocated::cast(first->TempAt(i));
+      if (temp->HasFixedPolicy()) {
+        AllocateFixed(temp, gap_index - 1, false);
+      }
+    }
+  }
+
+  // Handle fixed output operand.
+  if (first != NULL && first->Output() != NULL) {
+    LUnallocated* first_output = LUnallocated::cast(first->Output());
+    LiveRange* range = LiveRangeFor(first_output->VirtualRegister());
+    bool assigned = false;
+    if (first_output->HasFixedPolicy()) {
+      LUnallocated* output_copy = first_output->CopyUnconstrained();
+      bool is_tagged = HasTaggedValue(first_output->VirtualRegister());
+      AllocateFixed(first_output, gap_index, is_tagged);
+
+      // This value is produced on the stack, we never need to spill it.
+      if (first_output->IsStackSlot()) {
+        range->SetSpillOperand(first_output);
+        range->SetSpillStartIndex(gap_index - 1);
+        assigned = true;
+      }
+      chunk_->AddGapMove(gap_index, first_output, output_copy);
+    }
+
+    if (!assigned) {
+      range->SetSpillStartIndex(gap_index);
+
+      // This move to spill operand is not a real use. Liveness analysis
+      // and splitting of live ranges do not account for it.
+      // Thus it should be inserted to a lifetime position corresponding to
+      // the instruction end.
+      LGap* gap = chunk_->GetGapAt(gap_index);
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE);
+      move->AddMove(first_output, range->GetSpillOperand());
+    }
+  }
+
+  // Handle fixed input operands of second instruction.
+  if (second != NULL) {
+    for (int i = 0; i < second->InputCount(); ++i) {
+      LUnallocated* cur_input = LUnallocated::cast(second->InputAt(i));
+      if (cur_input->HasFixedPolicy()) {
+        LUnallocated* input_copy = cur_input->CopyUnconstrained();
+        bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
+        AllocateFixed(cur_input, gap_index + 1, is_tagged);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
+        LUnallocated* input_copy = cur_input->CopyUnconstrained();
+        cur_input->set_virtual_register(next_virtual_register_++);
+        second->AddTemp(cur_input);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      }
+    }
+  }
+
+  // Handle "output same as input" for second instruction.
+  if (second != NULL && second->Output() != NULL) {
+    LUnallocated* second_output = LUnallocated::cast(second->Output());
+    if (second_output->HasSameAsInputPolicy()) {
+      LUnallocated* cur_input = LUnallocated::cast(second->InputAt(0));
+      int output_vreg = second_output->VirtualRegister();
+      int input_vreg = cur_input->VirtualRegister();
+
+      LUnallocated* input_copy = cur_input->CopyUnconstrained();
+      cur_input->set_virtual_register(second_output->virtual_register());
+      AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+      if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+        int index = gap_index + 1;
+        LInstruction* instr = chunk_->instructions()->at(index);
+        if (instr->HasPointerMap()) {
+          instr->pointer_map()->RecordPointer(input_copy);
+        }
+      } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+        // The input is assumed to immediately have a tagged representation,
+        // before the pointer map can be used. I.e. the pointer map at the
+        // instruction will include the output operand (whose value at the
+        // beginning of the instruction is equal to the input operand). If
+        // this is not desired, then the pointer map at this instruction needs
+        // to be adjusted manually.
+      }
+    }
+  }
+}
+
+
+void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
+  int block_start = block->first_instruction_index();
+  int index = block->last_instruction_index();
+
+  LifetimePosition block_start_position =
+      LifetimePosition::FromInstructionIndex(block_start);
+
+  while (index >= block_start) {
+    LifetimePosition curr_position =
+        LifetimePosition::FromInstructionIndex(index);
+
+    if (chunk_->IsGapAt(index)) {
+      // We have a gap at this position.
+      LGap* gap = chunk_->GetGapAt(index);
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+      const ZoneList<LMoveOperands>* move_operands = move->move_operands();
+      for (int i = 0; i < move_operands->length(); ++i) {
+        LMoveOperands* cur = &move_operands->at(i);
+        if (cur->IsIgnored()) continue;
+        LOperand* from = cur->from();
+        LOperand* to = cur->to();
+        HPhi* phi = LookupPhi(to);
+        LOperand* hint = to;
+        if (phi != NULL) {
+          // This is a phi resolving move.
+          if (!phi->block()->IsLoopHeader()) {
+            hint = LiveRangeFor(phi->id())->FirstHint();
+          }
+        } else {
+          if (to->IsUnallocated()) {
+            if (live->Contains(to->VirtualRegister())) {
+              Define(curr_position, to, from);
+              live->Remove(to->VirtualRegister());
+            } else {
+              cur->Eliminate();
+              continue;
+            }
+          } else {
+            Define(curr_position, to, from);
+          }
+        }
+        Use(block_start_position, curr_position, from, hint);
+        if (from->IsUnallocated()) {
+          live->Add(from->VirtualRegister());
+        }
+      }
+    } else {
+      ASSERT(!chunk_->IsGapAt(index));
+      InstructionSummary* summary = GetSummary(index);
+
+      if (summary != NULL) {
+        LOperand* output = summary->Output();
+        if (output != NULL) {
+          if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
+          Define(curr_position, output, NULL);
+        }
+
+        if (summary->IsCall()) {
+          for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+            if (output == NULL || !output->IsRegister() ||
+                output->index() != i) {
+              LiveRange* range = FixedLiveRangeFor(i);
+              range->AddUseInterval(curr_position,
+                                    curr_position.InstructionEnd());
+            }
+          }
+          for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+            if (output == NULL || !output->IsDoubleRegister() ||
+                output->index() != i) {
+              LiveRange* range = FixedDoubleLiveRangeFor(i);
+              range->AddUseInterval(curr_position,
+                                    curr_position.InstructionEnd());
+            }
+          }
+        }
+
+        for (int i = 0; i < summary->InputCount(); ++i) {
+          LOperand* input = summary->InputAt(i);
+
+          LifetimePosition use_pos;
+          if (input->IsUnallocated() &&
+              LUnallocated::cast(input)->IsUsedAtStart()) {
+            use_pos = curr_position;
+          } else {
+            use_pos = curr_position.InstructionEnd();
+          }
+
+          Use(block_start_position, use_pos, input, NULL);
+          if (input->IsUnallocated()) live->Add(input->VirtualRegister());
+        }
+
+        for (int i = 0; i < summary->TempCount(); ++i) {
+          LOperand* temp = summary->TempAt(i);
+          if (summary->IsCall()) {
+            if (temp->IsRegister()) continue;
+            if (temp->IsUnallocated()) {
+              LUnallocated* temp_unalloc = LUnallocated::cast(temp);
+              if (temp_unalloc->HasFixedPolicy()) {
+                continue;
+              }
+            }
+          }
+          Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+          Define(curr_position, temp, NULL);
+        }
+      }
+    }
+
+    index = index - 1;
+  }
+}
+
+
+void LAllocator::ResolvePhis(HBasicBlock* block) {
+  const ZoneList<HPhi*>* phis = block->phis();
+  for (int i = 0; i < phis->length(); ++i) {
+    HPhi* phi = phis->at(i);
+    LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE);
+    phi_operand->set_virtual_register(phi->id());
+    for (int j = 0; j < phi->OperandCount(); ++j) {
+      HValue* op = phi->OperandAt(j);
+      LOperand* operand = NULL;
+      if (op->IsConstant() && op->EmitAtUses()) {
+        HConstant* constant = HConstant::cast(op);
+        operand = chunk_->DefineConstantOperand(constant);
+      } else {
+        ASSERT(!op->EmitAtUses());
+        LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
+        unalloc->set_virtual_register(op->id());
+        operand = unalloc;
+      }
+      HBasicBlock* cur_block = block->predecessors()->at(j);
+      // The gap move must be added without any special processing as in
+      // the AddConstraintsGapMove.
+      chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
+                         operand,
+                         phi_operand);
+    }
+
+    LiveRange* live_range = LiveRangeFor(phi->id());
+    LLabel* label = chunk_->GetLabel(phi->block()->block_id());
+    label->GetOrCreateParallelMove(LGap::START)->
+        AddMove(phi_operand, live_range->GetSpillOperand());
+    live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
+  }
+}
+
+
+void LAllocator::Allocate(LChunk* chunk) {
+  ASSERT(chunk_ == NULL);
+  chunk_ = chunk;
+  MeetRegisterConstraints();
+  ResolvePhis();
+  BuildLiveRanges();
+  AllocateGeneralRegisters();
+  AllocateDoubleRegisters();
+  PopulatePointerMaps();
+  if (has_osr_entry_) ProcessOsrEntry();
+  ConnectRanges();
+  ResolveControlFlow();
+}
+
+
+void LAllocator::MeetRegisterConstraints() {
+  HPhase phase("Register constraints", chunk());
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); ++i) {
+    HBasicBlock* block = blocks->at(i);
+    MeetRegisterConstraints(block);
+  }
+}
+
+
+void LAllocator::ResolvePhis() {
+  HPhase phase("Resolve phis", chunk());
+
+  // Process the blocks in reverse order.
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    ResolvePhis(block);
+  }
+}
+
+
+void LAllocator::ResolveControlFlow(LiveRange* range,
+                                    HBasicBlock* block,
+                                    HBasicBlock* pred) {
+  LifetimePosition pred_end =
+      LifetimePosition::FromInstructionIndex(pred->last_instruction_index()).
+      PrevInstruction();
+
+  LifetimePosition cur_start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LiveRange* pred_cover = NULL;
+  LiveRange* cur_cover = NULL;
+  LiveRange* cur_range = range;
+  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+    if (cur_range->CanCover(cur_start)) {
+      ASSERT(cur_cover == NULL);
+      cur_cover = cur_range;
+    }
+    if (cur_range->CanCover(pred_end)) {
+      ASSERT(pred_cover == NULL);
+      pred_cover = cur_range;
+    }
+    cur_range = cur_range->next();
+  }
+
+  if (cur_cover->IsSpilled()) return;
+  ASSERT(pred_cover != NULL && cur_cover != NULL);
+  if (pred_cover != cur_cover) {
+    LOperand* pred_op = pred_cover->CreateAssignedOperand();
+    LOperand* cur_op = cur_cover->CreateAssignedOperand();
+    if (!pred_op->Equals(cur_op)) {
+      LGap* gap = NULL;
+      if (block->predecessors()->length() == 1) {
+        gap = chunk_->GetGapAt(block->first_instruction_index());
+      } else {
+        ASSERT(pred->end()->SecondSuccessor() == NULL);
+        gap = GetLastGap(pred);
+      }
+      gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
+    }
+  }
+}
+
+
+LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
+  int index = pos.InstructionIndex();
+  if (chunk_->IsGapAt(index)) {
+    LGap* gap = chunk_->GetGapAt(index);
+    return gap->GetOrCreateParallelMove(
+        pos.IsInstructionStart() ? LGap::START : LGap::END);
+  }
+  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+  return chunk_->GetGapAt(gap_pos)->GetOrCreateParallelMove(
+      (gap_pos < index) ? LGap::AFTER : LGap::BEFORE);
+}
+
+
+HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
+  LGap* gap = chunk_->GetGapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
+  return gap->block();
+}
+
+
+void LAllocator::ConnectRanges() {
+  HPhase phase("Connect ranges", this);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* first_range = live_ranges()->at(i);
+    if (first_range == NULL || first_range->parent() != NULL) continue;
+
+    LiveRange* second_range = first_range->next();
+    while (second_range != NULL) {
+      LifetimePosition pos = second_range->Start();
+
+      if (!second_range->IsSpilled()) {
+        // Add gap move if the two live ranges touch and there is no block
+        // boundary.
+        if (first_range->End().Value() == pos.Value()) {
+          bool should_insert = true;
+          if (IsBlockBoundary(pos)) {
+            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+          }
+          if (should_insert) {
+            LParallelMove* move = GetConnectingParallelMove(pos);
+            LOperand* prev_operand = first_range->CreateAssignedOperand();
+            LOperand* cur_operand = second_range->CreateAssignedOperand();
+            move->AddMove(prev_operand, cur_operand);
+          }
+        }
+      }
+
+      first_range = second_range;
+      second_range = second_range->next();
+    }
+  }
+}
+
+
+bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
+  if (block->predecessors()->length() != 1) return false;
+  return block->predecessors()->first()->block_id() == block->block_id() - 1;
+}
+
+
+void LAllocator::ResolveControlFlow() {
+  HPhase phase("Resolve control flow", this);
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int block_id = 1; block_id < blocks->length(); ++block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    if (CanEagerlyResolveControlFlow(block)) continue;
+    BitVector* live = live_in_sets_[block->block_id()];
+    BitVector::Iterator iterator(live);
+    while (!iterator.Done()) {
+      int operand_index = iterator.Current();
+      for (int i = 0; i < block->predecessors()->length(); ++i) {
+        HBasicBlock* cur = block->predecessors()->at(i);
+        LiveRange* cur_range = LiveRangeFor(operand_index);
+        ResolveControlFlow(cur_range, block, cur);
+      }
+      iterator.Advance();
+    }
+  }
+}
+
+
+void LAllocator::BuildLiveRanges() {
+  HPhase phase("Build live ranges", this);
+  InitializeLivenessAnalysis();
+  // Process the blocks in reverse order.
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    BitVector* live = ComputeLiveOut(block);
+    // Initially consider all live_out values live for the entire block. We
+    // will shorten these intervals if necessary.
+    AddInitialIntervals(block, live);
+
+    // Process the instructions in reverse order, generating and killing
+    // live values.
+    ProcessInstructions(block, live);
+    // All phi output operands are killed by this block.
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int i = 0; i < phis->length(); ++i) {
+      // The live range interval already ends at the first instruction of the
+      // block.
+      HPhi* phi = phis->at(i);
+      live->Remove(phi->id());
+
+      LOperand* hint = NULL;
+      LOperand* phi_operand = NULL;
+      LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
+      for (int j = 0; j < move->move_operands()->length(); ++j) {
+        LOperand* to = move->move_operands()->at(j).to();
+        if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
+          hint = move->move_operands()->at(j).from();
+          phi_operand = to;
+          break;
+        }
+      }
+      ASSERT(hint != NULL);
+
+      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+              block->first_instruction_index());
+      Define(block_start, phi_operand, hint);
+    }
+
+    // Now live is live_in for this block except not including values live
+    // out on backward successor edges.
+    live_in_sets_[block_id] = live;
+
+    // If this block is a loop header go back and patch up the necessary
+    // predecessor blocks.
+    if (block->IsLoopHeader()) {
+      // TODO(kmillikin): Need to be able to get the last block of the loop
+      // in the loop information. Add a live range stretching from the first
+      // loop instruction to the last for each value live on entry to the
+      // header.
+      HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+      BitVector::Iterator iterator(live);
+      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      LifetimePosition end = LifetimePosition::FromInstructionIndex(
+          back_edge->last_instruction_index());
+      while (!iterator.Done()) {
+        int operand_index = iterator.Current();
+        LiveRange* range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end);
+        iterator.Advance();
+      }
+
+      for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
+        live_in_sets_[i]->Union(*live);
+      }
+    }
+
+#ifdef DEBUG
+    if (block_id == 0) {
+      BitVector::Iterator iterator(live);
+      bool found = false;
+      while (!iterator.Done()) {
+        found = true;
+        int operand_index = iterator.Current();
+        PrintF("Function: %s\n",
+               *graph()->info()->function()->debug_name()->ToCString());
+        PrintF("Value %d used before first definition!\n", operand_index);
+        LiveRange* range = LiveRangeFor(operand_index);
+        PrintF("First use is at %d\n", range->first_pos()->pos().Value());
+        iterator.Advance();
+      }
+      ASSERT(!found);
+    }
+#endif
+  }
+}
+
+
+bool LAllocator::SafePointsAreInOrder() const {
+  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
+  int safe_point = 0;
+  for (int i = 0; i < pointer_maps->length(); ++i) {
+    LPointerMap* map = pointer_maps->at(i);
+    if (safe_point > map->lithium_position()) return false;
+    safe_point = map->lithium_position();
+  }
+  return true;
+}
+
+
+void LAllocator::PopulatePointerMaps() {
+  HPhase phase("Populate pointer maps", this);
+  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
+
+  ASSERT(SafePointsAreInOrder());
+
+  // Iterate over all safe point positions and record a pointer
+  // for all spilled live ranges at this point.
+  int first_safe_point_index = 0;
+  int last_range_start = 0;
+  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+    LiveRange* range = live_ranges()->at(range_idx);
+    if (range == NULL) continue;
+    // Iterate over the first parts of multi-part live ranges.
+    if (range->parent() != NULL) continue;
+    // Skip non-pointer values.
+    if (!HasTaggedValue(range->id())) continue;
+    // Skip empty live ranges.
+    if (range->IsEmpty()) continue;
+
+    // Find the extent of the range and its children.
+    int start = range->Start().InstructionIndex();
+    int end = 0;
+    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+      LifetimePosition this_end = cur->End();
+      if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+      ASSERT(cur->Start().InstructionIndex() >= start);
+    }
+
+    // Most of the ranges are in order, but not all.  Keep an eye on when
+    // they step backwards and reset the first_safe_point_index so we don't
+    // miss any safe points.
+    if (start < last_range_start) {
+      first_safe_point_index = 0;
+    }
+    last_range_start = start;
+
+    // Step across all the safe points that are before the start of this range,
+    // recording how far we step in order to save doing this for the next range.
+    while (first_safe_point_index < pointer_maps->length()) {
+      LPointerMap* map = pointer_maps->at(first_safe_point_index);
+      int safe_point = map->lithium_position();
+      if (safe_point >= start) break;
+      first_safe_point_index++;
+    }
+
+    // Step through the safe points to see whether they are in the range.
+    for (int safe_point_index = first_safe_point_index;
+         safe_point_index < pointer_maps->length();
+         ++safe_point_index) {
+      LPointerMap* map = pointer_maps->at(safe_point_index);
+      int safe_point = map->lithium_position();
+
+      // The safe points are sorted so we can stop searching here.
+      if (safe_point - 1 > end) break;
+
+      // Advance to the next active range that covers the current
+      // safe point position.
+      LifetimePosition safe_point_pos =
+          LifetimePosition::FromInstructionIndex(safe_point);
+      LiveRange* cur = range;
+      while (cur != NULL && !cur->Covers(safe_point_pos.PrevInstruction())) {
+        cur = cur->next();
+      }
+      if (cur == NULL) continue;
+
+      // Check if the live range is spilled and the safe point is after
+      // the spill position.
+      if (range->HasAllocatedSpillOperand() &&
+          safe_point >= range->spill_start_index()) {
+        TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+                   range->id(), range->spill_start_index(), safe_point);
+        map->RecordPointer(range->GetSpillOperand());
+      }
+
+      if (!cur->IsSpilled()) {
+        TraceAlloc("Pointer in register for range %d (start at %d) "
+                   "at safe point %d\n",
+                   cur->id(), cur->Start().Value(), safe_point);
+        LOperand* operand = cur->CreateAssignedOperand();
+        ASSERT(!operand->IsStackSlot());
+        map->RecordPointer(operand);
+      }
+    }
+  }
+}
+
+
+void LAllocator::ProcessOsrEntry() {
+  const ZoneList<LInstruction*>* instrs = chunk_->instructions();
+
+  // Linear search for the OSR entry instruction in the chunk.
+  int index = -1;
+  while (++index < instrs->length() &&
+         !instrs->at(index)->IsOsrEntry()) {
+  }
+  ASSERT(index < instrs->length());
+  LOsrEntry* instruction = LOsrEntry::cast(instrs->at(index));
+
+  LifetimePosition position = LifetimePosition::FromInstructionIndex(index);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* range = live_ranges()->at(i);
+    if (range != NULL) {
+      if (range->Covers(position) &&
+          range->HasRegisterAssigned() &&
+          range->TopLevel()->HasAllocatedSpillOperand()) {
+        int reg_index = range->assigned_register();
+        LOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+        if (range->IsDouble()) {
+          instruction->MarkSpilledDoubleRegister(reg_index, spill_operand);
+        } else {
+          instruction->MarkSpilledRegister(reg_index, spill_operand);
+        }
+      }
+    }
+  }
+}
+
+
+void LAllocator::AllocateGeneralRegisters() {
+  HPhase phase("Allocate general registers", this);
+  num_registers_ = Register::kNumAllocatableRegisters;
+  mode_ = GENERAL_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void LAllocator::AllocateDoubleRegisters() {
+  HPhase phase("Allocate double registers", this);
+  num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+  mode_ = DOUBLE_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void LAllocator::AllocateRegisters() {
+  ASSERT(mode_ != NONE);
+  reusable_slots_.Clear();
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) {
+        AddToUnhandledUnsorted(live_ranges_[i]);
+      }
+    }
+  }
+  SortUnhandled();
+  ASSERT(UnhandledIsSorted());
+
+  ASSERT(active_live_ranges_.is_empty());
+  ASSERT(inactive_live_ranges_.is_empty());
+
+  if (mode_ == DOUBLE_REGISTERS) {
+    for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_double_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  } else {
+    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  }
+
+  while (!unhandled_live_ranges_.is_empty()) {
+    ASSERT(UnhandledIsSorted());
+    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    ASSERT(UnhandledIsSorted());
+    LifetimePosition position = current->Start();
+    TraceAlloc("Processing interval %d start=%d\n",
+               current->id(),
+               position.Value());
+
+    if (current->HasAllocatedSpillOperand()) {
+      TraceAlloc("Live range %d already has a spill operand\n", current->id());
+      LifetimePosition next_pos = position;
+      if (chunk_->IsGapAt(next_pos.InstructionIndex())) {
+        next_pos = next_pos.NextInstruction();
+      }
+      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      // If the range already has a spill operand and it doesn't need a
+      // register immediately, split it and spill the first part of the range.
+      if (pos == NULL) {
+        Spill(current);
+        continue;
+      } else if (pos->pos().Value() >
+                 current->Start().NextInstruction().Value()) {
+        // Do not spill live range eagerly if use position that can benefit from
+        // the register is too close to the start of live range.
+        SpillBetween(current, current->Start(), pos->pos());
+        ASSERT(UnhandledIsSorted());
+        continue;
+      }
+    }
+
+    for (int i = 0; i < active_live_ranges_.length(); ++i) {
+      LiveRange* cur_active = active_live_ranges_.at(i);
+      if (cur_active->End().Value() <= position.Value()) {
+        ActiveToHandled(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      } else if (!cur_active->Covers(position)) {
+        ActiveToInactive(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      }
+    }
+
+    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+      if (cur_inactive->End().Value() <= position.Value()) {
+        InactiveToHandled(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      } else if (cur_inactive->Covers(position)) {
+        InactiveToActive(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      }
+    }
+
+    ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+    bool result = TryAllocateFreeReg(current);
+    if (!result) {
+      AllocateBlockedReg(current);
+    }
+
+    if (current->HasRegisterAssigned()) {
+      AddToActive(current);
+    }
+  }
+
+  active_live_ranges_.Clear();
+  inactive_live_ranges_.Clear();
+}
+
+
+void LAllocator::Setup() {
+  LConstantOperand::SetupCache();
+  LStackSlot::SetupCache();
+  LDoubleStackSlot::SetupCache();
+  LRegister::SetupCache();
+  LDoubleRegister::SetupCache();
+}
+
+
+const char* LAllocator::RegisterName(int allocation_index) {
+  ASSERT(mode_ != NONE);
+  if (mode_ == GENERAL_REGISTERS) {
+    return Register::AllocationIndexToString(allocation_index);
+  } else {
+    return DoubleRegister::AllocationIndexToString(allocation_index);
+  }
+}
+
+
+void LAllocator::TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
+  operand->set_virtual_register(value->id());
+  current_summary()->AddInput(operand);
+}
+
+
+bool LAllocator::HasTaggedValue(int virtual_register) const {
+  HValue* value = graph()->LookupValue(virtual_register);
+  if (value == NULL) return false;
+  return value->representation().IsTagged();
+}
+
+
+RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
+  HValue* value = graph()->LookupValue(virtual_register);
+  if (value != NULL && value->representation().IsDouble()) {
+    return DOUBLE_REGISTERS;
+  }
+  return GENERAL_REGISTERS;
+}
+
+
+void LAllocator::MarkAsCall() {
+  current_summary()->MarkAsCall();
+}
+
+
+void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
+  operand->set_virtual_register(instr->id());
+  current_summary()->SetOutput(operand);
+}
+
+
+void LAllocator::RecordTemporary(LUnallocated* operand) {
+  ASSERT(next_virtual_register_ < LUnallocated::kMaxVirtualRegisters);
+  if (!operand->HasFixedPolicy()) {
+    operand->set_virtual_register(next_virtual_register_++);
+  }
+  current_summary()->AddTemp(operand);
+}
+
+
+int LAllocator::max_initial_value_ids() {
+  return LUnallocated::kMaxVirtualRegisters / 32;
+}
+
+
+void LAllocator::BeginInstruction() {
+  if (next_summary_ == NULL) {
+    next_summary_ = new InstructionSummary();
+  }
+  summary_stack_.Add(next_summary_);
+  next_summary_ = NULL;
+}
+
+
+void LAllocator::SummarizeInstruction(int index) {
+  InstructionSummary* sum = summary_stack_.RemoveLast();
+  if (summaries_.length() <= index) {
+    summaries_.AddBlock(NULL, index + 1 - summaries_.length());
+  }
+  ASSERT(summaries_[index] == NULL);
+  if (sum->Output() != NULL || sum->InputCount() > 0 || sum->TempCount() > 0) {
+    summaries_[index] = sum;
+  } else {
+    next_summary_ = sum;
+  }
+}
+
+
+void LAllocator::OmitInstruction() {
+  summary_stack_.RemoveLast();
+}
+
+
+void LAllocator::AddToActive(LiveRange* range) {
+  TraceAlloc("Add live range %d to active\n", range->id());
+  active_live_ranges_.Add(range);
+}
+
+
+void LAllocator::AddToInactive(LiveRange* range) {
+  TraceAlloc("Add live range %d to inactive\n", range->id());
+  inactive_live_ranges_.Add(range);
+}
+
+
+void LAllocator::AddToUnhandledSorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+    LiveRange* cur_range = unhandled_live_ranges_.at(i);
+    if (range->ShouldBeAllocatedBefore(cur_range)) {
+      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+      unhandled_live_ranges_.InsertAt(i + 1, range);
+      ASSERT(UnhandledIsSorted());
+      return;
+    }
+  }
+  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+  unhandled_live_ranges_.InsertAt(0, range);
+  ASSERT(UnhandledIsSorted());
+}
+
+
+void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+  unhandled_live_ranges_.Add(range);
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+  ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
+         !(*b)->ShouldBeAllocatedBefore(*a));
+  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+  return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list.  This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void LAllocator::SortUnhandled() {
+  TraceAlloc("Sort unhandled\n");
+  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool LAllocator::UnhandledIsSorted() {
+  int len = unhandled_live_ranges_.length();
+  for (int i = 1; i < len; i++) {
+    LiveRange* a = unhandled_live_ranges_.at(i - 1);
+    LiveRange* b = unhandled_live_ranges_.at(i);
+    if (a->Start().Value() < b->Start().Value()) return false;
+  }
+  return true;
+}
+
+
+void LAllocator::FreeSpillSlot(LiveRange* range) {
+  // Check that we are the last range.
+  if (range->next() != NULL) return;
+
+  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+  int index = range->TopLevel()->GetSpillOperand()->index();
+  if (index >= 0) {
+    reusable_slots_.Add(range);
+  }
+}
+
+
+LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
+  if (reusable_slots_.is_empty()) return NULL;
+  if (reusable_slots_.first()->End().Value() >
+      range->TopLevel()->Start().Value()) {
+    return NULL;
+  }
+  LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
+  reusable_slots_.Remove(0);
+  return result;
+}
+
+
+void LAllocator::ActiveToHandled(LiveRange* range) {
+  ASSERT(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from active to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void LAllocator::ActiveToInactive(LiveRange* range) {
+  ASSERT(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  inactive_live_ranges_.Add(range);
+  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void LAllocator::InactiveToHandled(LiveRange* range) {
+  ASSERT(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void LAllocator::InactiveToActive(LiveRange* range) {
+  ASSERT(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  active_live_ranges_.Add(range);
+  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
+              Register::kNumAllocatableRegisters);
+
+
+bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
+  LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    free_until_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* cur_active = active_live_ranges_.at(i);
+    free_until_pos[cur_active->assigned_register()] =
+        LifetimePosition::FromInstructionIndex(0);
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    ASSERT(cur_inactive->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection =
+        cur_inactive->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = cur_inactive->assigned_register();
+    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+  }
+
+  UsePosition* hinted_use = current->FirstPosWithHint();
+  if (hinted_use != NULL) {
+    LOperand* hint = hinted_use->hint();
+    if (hint->IsRegister() || hint->IsDoubleRegister()) {
+      int register_index = hint->index();
+      TraceAlloc(
+          "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+          RegisterName(register_index),
+          free_until_pos[register_index].Value(),
+          current->id(),
+          current->End().Value());
+
+      // The desired register is free until the end of the current live range.
+      if (free_until_pos[register_index].Value() >= current->End().Value()) {
+        TraceAlloc("Assigning preferred reg %s to live range %d\n",
+                   RegisterName(register_index),
+                   current->id());
+        current->set_assigned_register(register_index, mode_);
+        return true;
+      }
+    }
+  }
+
+  // Find the register which stays free for the longest time.
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = free_until_pos[reg];
+
+  if (pos.Value() <= current->Start().Value()) {
+    // All registers are blocked.
+    return false;
+  }
+
+  if (pos.Value() < current->End().Value()) {
+    // Register reg is available at the range start but becomes blocked before
+    // the range end. Split current at position where it becomes blocked.
+    LiveRange* tail = SplitAt(current, pos);
+    AddToUnhandledSorted(tail);
+  }
+
+
+  // Register reg is available at the range start and is free until
+  // the range end.
+  ASSERT(pos.Value() >= current->End().Value());
+  TraceAlloc("Assigning free reg %s to live range %d\n",
+             RegisterName(reg),
+             current->id());
+  current->set_assigned_register(reg, mode_);
+
+  return true;
+}
+
+
+void LAllocator::AllocateBlockedReg(LiveRange* current) {
+  UsePosition* register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == NULL) {
+    // There is no use in the current live range that requires a register.
+    // We can just spill it.
+    Spill(current);
+    return;
+  }
+
+
+  LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+
+  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+    use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::FromInstructionIndex(0);
+    } else {
+      UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
+          current->Start());
+      if (next_use == NULL) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
+      }
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_.at(i);
+    ASSERT(range->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed()) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    } else {
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+    }
+  }
+
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (use_pos[i].Value() > use_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = use_pos[reg];
+
+  if (pos.Value() < register_use->pos().Value()) {
+    // All registers are blocked before the first use that requires a register.
+    // Spill starting part of live range up to that use.
+    //
+    // Corner case: the first use position is equal to the start of the range.
+    // In this case we have nothing to spill and SpillBetween will just return
+    // this range to the list of unhandled ones. This will lead to the infinite
+    // loop.
+    ASSERT(current->Start().Value() < register_use->pos().Value());
+    SpillBetween(current, current->Start(), register_use->pos());
+    return;
+  }
+
+  if (block_pos[reg].Value() < current->End().Value()) {
+    // Register becomes blocked before the current range end. Split before that
+    // position.
+    LiveRange* tail = SplitBetween(current,
+                                   current->Start(),
+                                   block_pos[reg].InstructionStart());
+    AddToUnhandledSorted(tail);
+  }
+
+  // Register reg is not blocked for the whole range.
+  ASSERT(block_pos[reg].Value() >= current->End().Value());
+  TraceAlloc("Assigning blocked reg %s to live range %d\n",
+             RegisterName(reg),
+             current->id());
+  current->set_assigned_register(reg, mode_);
+
+  // This register was not free. Thus we need to find and spill
+  // parts of active and inactive live regions that use the same register
+  // at the same lifetime positions as current.
+  SplitAndSpillIntersecting(current);
+}
+
+
+void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+  ASSERT(current->HasRegisterAssigned());
+  int reg = current->assigned_register();
+  LifetimePosition split_pos = current->Start();
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    if (range->assigned_register() == reg) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      if (next_pos == NULL) {
+        SpillAfter(range, split_pos);
+      } else {
+        SpillBetween(range, split_pos, next_pos->pos());
+      }
+      ActiveToHandled(range);
+      --i;
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_[i];
+    ASSERT(range->End().Value() > current->Start().Value());
+    if (range->assigned_register() == reg && !range->IsFixed()) {
+      LifetimePosition next_intersection = range->FirstIntersection(current);
+      if (next_intersection.IsValid()) {
+        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+        if (next_pos == NULL) {
+          SpillAfter(range, split_pos);
+        } else {
+          next_intersection = Min(next_intersection, next_pos->pos());
+          SpillBetween(range, split_pos, next_intersection);
+        }
+        InactiveToHandled(range);
+        --i;
+      }
+    }
+  }
+}
+
+
+bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
+  return pos.IsInstructionStart() &&
+      chunk_->instructions()->at(pos.InstructionIndex())->IsLabel();
+}
+
+
+void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
+  UsePosition* prev_pos = prev->AddUsePosition(
+      LifetimePosition::FromInstructionIndex(pos));
+  UsePosition* next_pos = next->AddUsePosition(
+      LifetimePosition::FromInstructionIndex(pos));
+  LOperand* prev_operand = prev_pos->operand();
+  LOperand* next_operand = next_pos->operand();
+  LGap* gap = chunk_->GetGapAt(pos);
+  gap->GetOrCreateParallelMove(LGap::START)->
+      AddMove(prev_operand, next_operand);
+  next_pos->set_hint(prev_operand);
+}
+
+
+LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
+  ASSERT(!range->IsFixed());
+  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+  if (pos.Value() <= range->Start().Value()) return range;
+
+  LiveRange* result = LiveRangeFor(next_virtual_register_++);
+  range->SplitAt(pos, result);
+  return result;
+}
+
+
+LiveRange* LAllocator::SplitBetween(LiveRange* range,
+                                    LifetimePosition start,
+                                    LifetimePosition end) {
+  ASSERT(!range->IsFixed());
+  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+             range->id(),
+             start.Value(),
+             end.Value());
+
+  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  ASSERT(split_pos.Value() >= start.Value());
+  return SplitAt(range, split_pos);
+}
+
+
+LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
+                                                 LifetimePosition end) {
+  int start_instr = start.InstructionIndex();
+  int end_instr = end.InstructionIndex();
+  ASSERT(start_instr <= end_instr);
+
+  // We have no choice
+  if (start_instr == end_instr) return end;
+
+  HBasicBlock* end_block = GetBlock(start);
+  HBasicBlock* start_block = GetBlock(end);
+
+  if (end_block == start_block) {
+    // The interval is split in the same basic block. Split at latest possible
+    // position.
+    return end;
+  }
+
+  HBasicBlock* block = end_block;
+  // Find header of outermost loop.
+  while (block->parent_loop_header() != NULL &&
+      block->parent_loop_header()->block_id() > start_block->block_id()) {
+    block = block->parent_loop_header();
+  }
+
+  if (block == end_block) return end;
+
+  return LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+}
+
+
+void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+  LiveRange* second_part = SplitAt(range, pos);
+  Spill(second_part);
+}
+
+
+void LAllocator::SpillBetween(LiveRange* range,
+                              LifetimePosition start,
+                              LifetimePosition end) {
+  ASSERT(start.Value() < end.Value());
+  LiveRange* second_part = SplitAt(range, start);
+
+  if (second_part->Start().Value() < end.Value()) {
+    // The split result intersects with [start, end[.
+    // Split it at position between ]start+1, end[, spill the middle part
+    // and put the rest to unhandled.
+    LiveRange* third_part = SplitBetween(
+        second_part,
+        second_part->Start().InstructionEnd(),
+        end.PrevInstruction().InstructionEnd());
+
+    ASSERT(third_part != second_part);
+
+    Spill(second_part);
+    AddToUnhandledSorted(third_part);
+  } else {
+    // The split result does not intersect with [start, end[.
+    // Nothing to spill. Just put it to unhandled as whole.
+    AddToUnhandledSorted(second_part);
+  }
+}
+
+
+void LAllocator::Spill(LiveRange* range) {
+  ASSERT(!range->IsSpilled());
+  TraceAlloc("Spilling live range %d\n", range->id());
+  LiveRange* first = range->TopLevel();
+
+  if (!first->HasAllocatedSpillOperand()) {
+    LOperand* op = TryReuseSpillSlot(range);
+    if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
+    first->SetSpillOperand(op);
+  }
+  range->MakeSpilled();
+}
+
+
+int LAllocator::RegisterCount() const {
+  return num_registers_;
+}
+
+
+#ifdef DEBUG
+
+
+void LAllocator::Verify() const {
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* current = live_ranges()->at(i);
+    if (current != NULL) current->Verify();
+  }
+}
+
+
+#endif
+
+
+} }  // namespace v8::internal
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
new file mode 100644
index 0000000..3ec984e
--- /dev/null
+++ b/src/lithium-allocator.h
@@ -0,0 +1,989 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LITHIUM_ALLOCATOR_H_
+#define V8_LITHIUM_ALLOCATOR_H_
+
+#include "v8.h"
+
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HBasicBlock;
+class HGraph;
+class HInstruction;
+class HPhi;
+class HTracer;
+class HValue;
+class BitVector;
+class StringStream;
+
+class LArgument;
+class LChunk;
+class LConstantOperand;
+class LGap;
+class LInstruction;
+class LParallelMove;
+class LPointerMap;
+class LStackSlot;
+class LRegister;
+
+
+// This class represents a single point of a LOperand's lifetime.
+// For each lithium instruction there are exactly two lifetime positions:
+// the beginning and the end of the instruction. Lifetime positions for
+// different lithium instructions are disjoint.
+class LifetimePosition {
+ public:
+  // Return the lifetime position that corresponds to the beginning of
+  // the instruction with the given index.
+  static LifetimePosition FromInstructionIndex(int index) {
+    return LifetimePosition(index * kStep);
+  }
+
+  // Returns a numeric representation of this lifetime position.
+  int Value() const {
+    return value_;
+  }
+
+  // Returns the index of the instruction to which this lifetime position
+  // corresponds.
+  int InstructionIndex() const {
+    ASSERT(IsValid());
+    return value_ / kStep;
+  }
+
+  // Returns true if this lifetime position corresponds to the instruction
+  // start.
+  bool IsInstructionStart() const {
+    return (value_ & (kStep - 1)) == 0;
+  }
+
+  // Returns the lifetime position for the start of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionStart() const {
+    ASSERT(IsValid());
+    return LifetimePosition(value_ & ~(kStep - 1));
+  }
+
+  // Returns the lifetime position for the end of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionEnd() const {
+    ASSERT(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep/2);
+  }
+
+  // Returns the lifetime position for the beginning of the next instruction.
+  LifetimePosition NextInstruction() const {
+    ASSERT(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep);
+  }
+
+  // Returns the lifetime position for the beginning of the previous
+  // instruction.
+  LifetimePosition PrevInstruction() const {
+    ASSERT(IsValid());
+    ASSERT(value_ > 1);
+    return LifetimePosition(InstructionStart().Value() - kStep);
+  }
+
+  // Constructs the lifetime position which does not correspond to any
+  // instruction.
+  LifetimePosition() : value_(-1) {}
+
+  // Returns true if this lifetime positions corrensponds to some
+  // instruction.
+  bool IsValid() const { return value_ != -1; }
+
+  static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+  static inline LifetimePosition MaxPosition() {
+    // We have to use this kind of getter instead of static member due to
+    // crash bug in GDB.
+    return LifetimePosition(kMaxInt);
+  }
+
+ private:
+  static const int kStep = 2;
+
+  // Code relies on kStep being a power of two.
+  STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+  explicit LifetimePosition(int value) : value_(value) { }
+
+  int value_;
+};
+
+
+enum RegisterKind {
+  NONE,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+
+class LOperand: public ZoneObject {
+ public:
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT_OPERAND,
+    STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    REGISTER,
+    DOUBLE_REGISTER,
+    ARGUMENT
+  };
+
+  LOperand() : value_(KindField::encode(INVALID)) { }
+
+  Kind kind() const { return KindField::decode(value_); }
+  int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
+  bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
+  bool IsStackSlot() const { return kind() == STACK_SLOT; }
+  bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
+  bool IsRegister() const { return kind() == REGISTER; }
+  bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
+  bool IsArgument() const { return kind() == ARGUMENT; }
+  bool IsUnallocated() const { return kind() == UNALLOCATED; }
+  bool Equals(LOperand* other) const { return value_ == other->value_; }
+  int VirtualRegister();
+
+  void PrintTo(StringStream* stream);
+  void ConvertTo(Kind kind, int index) {
+    value_ = KindField::encode(kind);
+    value_ |= index << kKindFieldWidth;
+    ASSERT(this->index() == index);
+  }
+
+ protected:
+  static const int kKindFieldWidth = 3;
+  class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
+
+  LOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+  unsigned value_;
+};
+
+
+class LUnallocated: public LOperand {
+ public:
+  enum Policy {
+    NONE,
+    ANY,
+    FIXED_REGISTER,
+    FIXED_DOUBLE_REGISTER,
+    FIXED_SLOT,
+    MUST_HAVE_REGISTER,
+    WRITABLE_REGISTER,
+    SAME_AS_FIRST_INPUT,
+    SAME_AS_ANY_INPUT,
+    IGNORE
+  };
+
+  // Lifetime of operand inside the instruction.
+  enum Lifetime {
+    // USED_AT_START operand is guaranteed to be live only at
+    // instruction start. Register allocator is free to assign the same register
+    // to some other operand used inside instruction (i.e. temporary or
+    // output).
+    USED_AT_START,
+
+    // USED_AT_END operand is treated as live until the end of
+    // instruction. This means that register allocator will not reuse it's
+    // register for any other operand inside instruction.
+    USED_AT_END
+  };
+
+  explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
+    Initialize(policy, 0, USED_AT_END);
+  }
+
+  LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
+    Initialize(policy, fixed_index, USED_AT_END);
+  }
+
+  LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
+    Initialize(policy, 0, lifetime);
+  }
+
+  // The superclass has a KindField.  Some policies have a signed fixed
+  // index in the upper bits.
+  static const int kPolicyWidth = 4;
+  static const int kLifetimeWidth = 1;
+  static const int kVirtualRegisterWidth = 17;
+
+  static const int kPolicyShift = kKindFieldWidth;
+  static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
+  static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
+  static const int kFixedIndexShift =
+      kVirtualRegisterShift + kVirtualRegisterWidth;
+
+  class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
+
+  class LifetimeField
+      : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
+  };
+
+  class VirtualRegisterField
+      : public BitField<unsigned,
+                        kVirtualRegisterShift,
+                        kVirtualRegisterWidth> {
+  };
+
+  static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
+  static const int kMaxFixedIndices = 128;
+
+  bool HasIgnorePolicy() const { return policy() == IGNORE; }
+  bool HasNoPolicy() const { return policy() == NONE; }
+  bool HasAnyPolicy() const {
+    return policy() == ANY;
+  }
+  bool HasFixedPolicy() const {
+    return policy() == FIXED_REGISTER ||
+        policy() == FIXED_DOUBLE_REGISTER ||
+        policy() == FIXED_SLOT;
+  }
+  bool HasRegisterPolicy() const {
+    return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
+  }
+  bool HasSameAsInputPolicy() const {
+    return policy() == SAME_AS_FIRST_INPUT || policy() == SAME_AS_ANY_INPUT;
+  }
+  Policy policy() const { return PolicyField::decode(value_); }
+  void set_policy(Policy policy) {
+    value_ &= ~PolicyField::mask();
+    value_ |= PolicyField::encode(policy);
+  }
+  int fixed_index() const {
+    return static_cast<int>(value_) >> kFixedIndexShift;
+  }
+
+  unsigned virtual_register() const {
+    return VirtualRegisterField::decode(value_);
+  }
+
+  void set_virtual_register(unsigned id) {
+    value_ &= ~VirtualRegisterField::mask();
+    value_ |= VirtualRegisterField::encode(id);
+  }
+
+  LUnallocated* CopyUnconstrained() {
+    LUnallocated* result = new LUnallocated(ANY);
+    result->set_virtual_register(virtual_register());
+    return result;
+  }
+
+  static LUnallocated* cast(LOperand* op) {
+    ASSERT(op->IsUnallocated());
+    return reinterpret_cast<LUnallocated*>(op);
+  }
+
+  bool IsUsedAtStart() {
+    return LifetimeField::decode(value_) == USED_AT_START;
+  }
+
+ private:
+  void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
+    value_ |= PolicyField::encode(policy);
+    value_ |= LifetimeField::encode(lifetime);
+    value_ |= fixed_index << kFixedIndexShift;
+    ASSERT(this->fixed_index() == fixed_index);
+  }
+};
+
+
+class LMoveOperands BASE_EMBEDDED {
+ public:
+  LMoveOperands(LOperand* from, LOperand* to) : from_(from), to_(to) { }
+
+  LOperand* from() const { return from_; }
+  LOperand* to() const { return to_; }
+  bool IsRedundant() const {
+    return IsEliminated() || from_->Equals(to_) || IsIgnored();
+  }
+  bool IsEliminated() const { return from_ == NULL; }
+  bool IsIgnored() const {
+    if (to_ != NULL && to_->IsUnallocated() &&
+      LUnallocated::cast(to_)->HasIgnorePolicy()) {
+      return true;
+    }
+    return false;
+  }
+
+  void Eliminate() { from_ = to_ = NULL; }
+
+ private:
+  LOperand* from_;
+  LOperand* to_;
+};
+
+
+class LConstantOperand: public LOperand {
+ public:
+  static LConstantOperand* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LConstantOperand(index);
+  }
+
+  static LConstantOperand* cast(LOperand* op) {
+    ASSERT(op->IsConstantOperand());
+    return reinterpret_cast<LConstantOperand*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 128;
+  static LConstantOperand cache[];
+
+  LConstantOperand() : LOperand() { }
+  explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+};
+
+
+class LArgument: public LOperand {
+ public:
+  explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
+
+  static LArgument* cast(LOperand* op) {
+    ASSERT(op->IsArgument());
+    return reinterpret_cast<LArgument*>(op);
+  }
+};
+
+
+class LStackSlot: public LOperand {
+ public:
+  static LStackSlot* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LStackSlot(index);
+  }
+
+  static LStackSlot* cast(LOperand* op) {
+    ASSERT(op->IsStackSlot());
+    return reinterpret_cast<LStackSlot*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 128;
+  static LStackSlot cache[];
+
+  LStackSlot() : LOperand() { }
+  explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
+};
+
+
+class LDoubleStackSlot: public LOperand {
+ public:
+  static LDoubleStackSlot* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LDoubleStackSlot(index);
+  }
+
+  static LDoubleStackSlot* cast(LOperand* op) {
+    ASSERT(op->IsStackSlot());
+    return reinterpret_cast<LDoubleStackSlot*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 128;
+  static LDoubleStackSlot cache[];
+
+  LDoubleStackSlot() : LOperand() { }
+  explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
+};
+
+
+class LRegister: public LOperand {
+ public:
+  static LRegister* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LRegister(index);
+  }
+
+  static LRegister* cast(LOperand* op) {
+    ASSERT(op->IsRegister());
+    return reinterpret_cast<LRegister*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 16;
+  static LRegister cache[];
+
+  LRegister() : LOperand() { }
+  explicit LRegister(int index) : LOperand(REGISTER, index) { }
+};
+
+
+class LDoubleRegister: public LOperand {
+ public:
+  static LDoubleRegister* Create(int index) {
+    ASSERT(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new LDoubleRegister(index);
+  }
+
+  static LDoubleRegister* cast(LOperand* op) {
+    ASSERT(op->IsDoubleRegister());
+    return reinterpret_cast<LDoubleRegister*>(op);
+  }
+
+  static void SetupCache();
+
+ private:
+  static const int kNumCachedOperands = 16;
+  static LDoubleRegister cache[];
+
+  LDoubleRegister() : LOperand() { }
+  explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
+};
+
+
+// A register-allocator view of a Lithium instruction. It contains the id of
+// the output operand and a list of input operand uses.
+class InstructionSummary: public ZoneObject {
+ public:
+  InstructionSummary()
+      : output_operand_(NULL), input_count_(0), operands_(4), is_call_(false) {}
+
+  // Output operands.
+  LOperand* Output() const { return output_operand_; }
+  void SetOutput(LOperand* output) {
+    ASSERT(output_operand_ == NULL);
+    output_operand_ = output;
+  }
+
+  // Input operands.
+  int InputCount() const { return input_count_; }
+  LOperand* InputAt(int i) const {
+    ASSERT(i < input_count_);
+    return operands_[i];
+  }
+  void AddInput(LOperand* input) {
+    operands_.InsertAt(input_count_, input);
+    input_count_++;
+  }
+
+  // Temporary operands.
+  int TempCount() const { return operands_.length() - input_count_; }
+  LOperand* TempAt(int i) const { return operands_[i + input_count_]; }
+  void AddTemp(LOperand* temp) { operands_.Add(temp); }
+
+  void MarkAsCall() { is_call_ = true; }
+  bool IsCall() const { return is_call_; }
+
+ private:
+  LOperand* output_operand_;
+  int input_count_;
+  ZoneList<LOperand*> operands_;
+  bool is_call_;
+};
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval: public ZoneObject {
+ public:
+  UseInterval(LifetimePosition start, LifetimePosition end)
+      : start_(start), end_(end), next_(NULL) {
+    ASSERT(start.Value() < end.Value());
+  }
+
+  LifetimePosition start() const { return start_; }
+  LifetimePosition end() const { return end_; }
+  UseInterval* next() const { return next_; }
+
+  // Split this interval at the given position without effecting the
+  // live range that owns it. The interval must contain the position.
+  void SplitAt(LifetimePosition pos);
+
+  // If this interval intersects with other return smallest position
+  // that belongs to both of them.
+  LifetimePosition Intersect(const UseInterval* other) const {
+    if (other->start().Value() < start_.Value()) return other->Intersect(this);
+    if (other->start().Value() < end_.Value()) return other->start();
+    return LifetimePosition::Invalid();
+  }
+
+  bool Contains(LifetimePosition point) const {
+    return start_.Value() <= point.Value() && point.Value() < end_.Value();
+  }
+
+ private:
+  void set_start(LifetimePosition start) { start_ = start; }
+  void set_next(UseInterval* next) { next_ = next; }
+
+  LifetimePosition start_;
+  LifetimePosition end_;
+  UseInterval* next_;
+
+  friend class LiveRange;  // Assigns to start_.
+};
+
+// Representation of a use position.
+class UsePosition: public ZoneObject {
+ public:
+  UsePosition(LifetimePosition pos, LOperand* operand)
+      : operand_(operand),
+        hint_(NULL),
+        pos_(pos),
+        next_(NULL),
+        requires_reg_(false),
+        register_beneficial_(true) {
+    if (operand_ != NULL && operand_->IsUnallocated()) {
+      LUnallocated* unalloc = LUnallocated::cast(operand_);
+      requires_reg_ = unalloc->HasRegisterPolicy();
+      register_beneficial_ = !unalloc->HasAnyPolicy();
+    }
+    ASSERT(pos_.IsValid());
+  }
+
+  LOperand* operand() const { return operand_; }
+  bool HasOperand() const { return operand_ != NULL; }
+
+  LOperand* hint() const { return hint_; }
+  void set_hint(LOperand* hint) { hint_ = hint; }
+  bool HasHint() const { return hint_ != NULL && !hint_->IsUnallocated(); }
+  bool RequiresRegister() const;
+  bool RegisterIsBeneficial() const;
+
+  LifetimePosition pos() const { return pos_; }
+  UsePosition* next() const { return next_; }
+
+ private:
+  void set_next(UsePosition* next) { next_ = next; }
+
+  LOperand* operand_;
+  LOperand* hint_;
+  LifetimePosition pos_;
+  UsePosition* next_;
+  bool requires_reg_;
+  bool register_beneficial_;
+
+  friend class LiveRange;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange: public ZoneObject {
+ public:
+  static const int kInvalidAssignment = 0x7fffffff;
+
+  explicit LiveRange(int id)
+      : id_(id),
+        spilled_(false),
+        assigned_register_(kInvalidAssignment),
+        assigned_register_kind_(NONE),
+        last_interval_(NULL),
+        first_interval_(NULL),
+        first_pos_(NULL),
+        parent_(NULL),
+        next_(NULL),
+        current_interval_(NULL),
+        last_processed_use_(NULL),
+        spill_start_index_(kMaxInt) {
+    spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
+  }
+
+  UseInterval* first_interval() const { return first_interval_; }
+  UsePosition* first_pos() const { return first_pos_; }
+  LiveRange* parent() const { return parent_; }
+  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* next() const { return next_; }
+  bool IsChild() const { return parent() != NULL; }
+  bool IsParent() const { return parent() == NULL; }
+  int id() const { return id_; }
+  bool IsFixed() const { return id_ < 0; }
+  bool IsEmpty() const { return first_interval() == NULL; }
+  LOperand* CreateAssignedOperand();
+  int assigned_register() const { return assigned_register_; }
+  int spill_start_index() const { return spill_start_index_; }
+  void set_assigned_register(int reg, RegisterKind register_kind) {
+    ASSERT(!HasRegisterAssigned() && !IsSpilled());
+    assigned_register_ = reg;
+    assigned_register_kind_ = register_kind;
+    ConvertOperands();
+  }
+  void MakeSpilled() {
+    ASSERT(!IsSpilled());
+    ASSERT(TopLevel()->HasAllocatedSpillOperand());
+    spilled_ = true;
+    assigned_register_ = kInvalidAssignment;
+    ConvertOperands();
+  }
+
+  // Returns use position in this live range that follows both start
+  // and last processed use position.
+  // Modifies internal state of live range!
+  UsePosition* NextUsePosition(LifetimePosition start);
+
+  // Returns use position for which register is required in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextRegisterPosition(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Can this live range be spilled at this position.
+  bool CanBeSpilled(LifetimePosition pos);
+
+  // Split this live range at the given position which must follow the start of
+  // the range.
+  // All uses following the given position will be moved from this
+  // live range to the result live range.
+  void SplitAt(LifetimePosition position, LiveRange* result);
+
+  bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
+  bool HasRegisterAssigned() const {
+    return assigned_register_ != kInvalidAssignment;
+  }
+  bool IsSpilled() const { return spilled_; }
+  UsePosition* FirstPosWithHint() const;
+
+  LOperand* FirstHint() const {
+    UsePosition* pos = FirstPosWithHint();
+    if (pos != NULL) return pos->hint();
+    return NULL;
+  }
+
+  LifetimePosition Start() const {
+    ASSERT(!IsEmpty());
+    return first_interval()->start();
+  }
+
+  LifetimePosition End() const {
+    ASSERT(!IsEmpty());
+    return last_interval_->end();
+  }
+
+  bool HasAllocatedSpillOperand() const {
+    return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
+  }
+  LOperand* GetSpillOperand() const { return spill_operand_; }
+  void SetSpillOperand(LOperand* operand) {
+    ASSERT(!operand->IsUnallocated());
+    ASSERT(spill_operand_ != NULL);
+    ASSERT(spill_operand_->IsUnallocated());
+    spill_operand_->ConvertTo(operand->kind(), operand->index());
+  }
+
+  void SetSpillStartIndex(int start) {
+    spill_start_index_ = Min(start, spill_start_index_);
+  }
+
+  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+  bool CanCover(LifetimePosition position) const;
+  bool Covers(LifetimePosition position);
+  LifetimePosition FirstIntersection(LiveRange* other);
+
+
+  // Add a new interval or a new use position to this live range.
+  void EnsureInterval(LifetimePosition start, LifetimePosition end);
+  void AddUseInterval(LifetimePosition start, LifetimePosition end);
+  UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
+  UsePosition* AddUsePosition(LifetimePosition pos);
+
+  // Shorten the most recently added interval by setting a new start.
+  void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+  // True if target overlaps an existing interval.
+  bool HasOverlap(UseInterval* target) const;
+  void Verify() const;
+#endif
+
+ private:
+  void ConvertOperands();
+  UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+                                  LifetimePosition but_not_past) const;
+
+  int id_;
+  bool spilled_;
+  int assigned_register_;
+  RegisterKind assigned_register_kind_;
+  UseInterval* last_interval_;
+  UseInterval* first_interval_;
+  UsePosition* first_pos_;
+  LiveRange* parent_;
+  LiveRange* next_;
+  // This is used as a cache, it doesn't affect correctness.
+  mutable UseInterval* current_interval_;
+  UsePosition* last_processed_use_;
+  LOperand* spill_operand_;
+  int spill_start_index_;
+};
+
+
+class LAllocator BASE_EMBEDDED {
+ public:
+  explicit LAllocator(int first_virtual_register, HGraph* graph)
+      : chunk_(NULL),
+        summaries_(0),
+        next_summary_(NULL),
+        summary_stack_(2),
+        live_in_sets_(0),
+        live_ranges_(16),
+        fixed_live_ranges_(8),
+        fixed_double_live_ranges_(8),
+        unhandled_live_ranges_(8),
+        active_live_ranges_(8),
+        inactive_live_ranges_(8),
+        reusable_slots_(8),
+        next_virtual_register_(first_virtual_register),
+        mode_(NONE),
+        num_registers_(-1),
+        graph_(graph),
+        has_osr_entry_(false) {}
+
+  static void Setup();
+  static void TraceAlloc(const char* msg, ...);
+
+  // Lithium translation support.
+  // Record a use of an input operand in the current instruction.
+  void RecordUse(HValue* value, LUnallocated* operand);
+  // Record the definition of the output operand.
+  void RecordDefinition(HInstruction* instr, LUnallocated* operand);
+  // Record a temporary operand.
+  void RecordTemporary(LUnallocated* operand);
+
+  // Marks the current instruction as a call.
+  void MarkAsCall();
+
+  // Checks whether the value of a given virtual register is tagged.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+  // Begin a new instruction.
+  void BeginInstruction();
+
+  // Summarize the current instruction.
+  void SummarizeInstruction(int index);
+
+  // Summarize the current instruction.
+  void OmitInstruction();
+
+  // Control max function size.
+  static int max_initial_value_ids();
+
+  void Allocate(LChunk* chunk);
+
+  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+  const ZoneList<LiveRange*>* fixed_live_ranges() const {
+    return &fixed_live_ranges_;
+  }
+  const ZoneList<LiveRange*>* fixed_double_live_ranges() const {
+    return &fixed_double_live_ranges_;
+  }
+
+  LChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+
+  void MarkAsOsrEntry() {
+    // There can be only one.
+    ASSERT(!has_osr_entry_);
+    // Simply set a flag to find and process instruction later.
+    has_osr_entry_ = true;
+  }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+ private:
+  void MeetRegisterConstraints();
+  void ResolvePhis();
+  void BuildLiveRanges();
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+  void ConnectRanges();
+  void ResolveControlFlow();
+  void PopulatePointerMaps();
+  void ProcessOsrEntry();
+  void AllocateRegisters();
+  bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
+  inline bool SafePointsAreInOrder() const;
+
+  // Liveness analysis support.
+  void InitializeLivenessAnalysis();
+  BitVector* ComputeLiveOut(HBasicBlock* block);
+  void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
+  void ProcessInstructions(HBasicBlock* block, BitVector* live);
+  void MeetRegisterConstraints(HBasicBlock* block);
+  void MeetConstraintsBetween(InstructionSummary* first,
+                              InstructionSummary* second,
+                              int gap_index);
+  void ResolvePhis(HBasicBlock* block);
+
+  // Helper methods for building intervals.
+  LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
+  LiveRange* LiveRangeFor(LOperand* operand);
+  void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
+  void Use(LifetimePosition block_start,
+           LifetimePosition position,
+           LOperand* operand,
+           LOperand* hint);
+  void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
+
+  // Helper methods for updating the life range lists.
+  void AddToActive(LiveRange* range);
+  void AddToInactive(LiveRange* range);
+  void AddToUnhandledSorted(LiveRange* range);
+  void AddToUnhandledUnsorted(LiveRange* range);
+  void SortUnhandled();
+  bool UnhandledIsSorted();
+  void ActiveToHandled(LiveRange* range);
+  void ActiveToInactive(LiveRange* range);
+  void InactiveToHandled(LiveRange* range);
+  void InactiveToActive(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  LOperand* TryReuseSpillSlot(LiveRange* range);
+
+  // Helper methods for allocating registers.
+  bool TryAllocateFreeReg(LiveRange* range);
+  void AllocateBlockedReg(LiveRange* range);
+
+  // Live range splitting helpers.
+
+  // Split the given range at the given position.
+  // If range starts at or after the given position then the
+  // original range is returned.
+  // Otherwise returns the live range that starts at pos and contains
+  // all uses from the original range that follow pos. Uses at pos will
+  // still be owned by the original range after splitting.
+  LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
+
+  // Split the given range in a position from the interval [start, end].
+  LiveRange* SplitBetween(LiveRange* range,
+                          LifetimePosition start,
+                          LifetimePosition end);
+
+  // Find a lifetime position in the interval [start, end] which
+  // is optimal for splitting: it is either header of the outermost
+  // loop covered by this interval or the latest possible position.
+  LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+                                       LifetimePosition end);
+
+  // Spill the given life range after position pos.
+  void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+  // Spill the given life range after position start and up to position end.
+  void SpillBetween(LiveRange* range,
+                    LifetimePosition start,
+                    LifetimePosition end);
+
+  void SplitAndSpillIntersecting(LiveRange* range);
+
+  void Spill(LiveRange* range);
+  bool IsBlockBoundary(LifetimePosition pos);
+  void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
+
+  // Helper methods for resolving control flow.
+  void ResolveControlFlow(LiveRange* range,
+                          HBasicBlock* block,
+                          HBasicBlock* pred);
+
+  // Return parallel move that should be used to connect ranges split at the
+  // given position.
+  LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+  // Return the block which contains give lifetime position.
+  HBasicBlock* GetBlock(LifetimePosition pos);
+
+  // Current active summary.
+  InstructionSummary* current_summary() const { return summary_stack_.last(); }
+
+  // Get summary for given instruction index.
+  InstructionSummary* GetSummary(int index) const { return summaries_[index]; }
+
+  // Helper methods for the fixed registers.
+  int RegisterCount() const;
+  static int FixedLiveRangeID(int index) { return -index - 1; }
+  static int FixedDoubleLiveRangeID(int index);
+  LiveRange* FixedLiveRangeFor(int index);
+  LiveRange* FixedDoubleLiveRangeFor(int index);
+  LiveRange* LiveRangeFor(int index);
+  HPhi* LookupPhi(LOperand* operand) const;
+  LGap* GetLastGap(HBasicBlock* block) const;
+
+  const char* RegisterName(int allocation_index);
+
+  LChunk* chunk_;
+  ZoneList<InstructionSummary*> summaries_;
+  InstructionSummary* next_summary_;
+
+  ZoneList<InstructionSummary*> summary_stack_;
+
+  // During liveness analysis keep a mapping from block id to live_in sets
+  // for blocks already analyzed.
+  ZoneList<BitVector*> live_in_sets_;
+
+  // Liveness analysis results.
+  ZoneList<LiveRange*> live_ranges_;
+
+  // Lists of live ranges
+  ZoneList<LiveRange*> fixed_live_ranges_;
+  ZoneList<LiveRange*> fixed_double_live_ranges_;
+  ZoneList<LiveRange*> unhandled_live_ranges_;
+  ZoneList<LiveRange*> active_live_ranges_;
+  ZoneList<LiveRange*> inactive_live_ranges_;
+  ZoneList<LiveRange*> reusable_slots_;
+
+  // Next virtual register number to be assigned to temporaries.
+  int next_virtual_register_;
+
+  RegisterKind mode_;
+  int num_registers_;
+
+  HGraph* graph_;
+
+  bool has_osr_entry_;
+
+  DISALLOW_COPY_AND_ASSIGN(LAllocator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LITHIUM_ALLOCATOR_H_
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 83b703f..0f7c12d 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -140,9 +140,13 @@
     // Collect shared infos for functions whose code need to be patched.
     var replaced_function_infos = new Array();
     for (var i = 0; i < replace_code_list.length; i++) {
-      var info_wrapper = replace_code_list[i].live_shared_info_wrapper;
-      if (info_wrapper) {
-        replaced_function_infos.push(info_wrapper);
+      var live_shared_function_infos =
+          replace_code_list[i].live_shared_function_infos;
+
+      if (live_shared_function_infos) {
+        for (var i = 0; i < live_shared_function_infos.length; i++) {
+          replaced_function_infos.push(live_shared_function_infos[i]);
+        }
       }
     }
 
@@ -204,6 +208,13 @@
       // unchanged and whether positions changed at all.
       PatchPositions(update_positions_list[i], diff_array,
           position_patch_report);
+
+      if (update_positions_list[i].live_shared_function_infos) {
+        update_positions_list[i].live_shared_function_infos.
+            forEach(function (info) {
+                %LiveEditFunctionSourceUpdated(info.raw_array);
+              });
+      }
     }
 
     break_points_restorer(pos_translator, old_script);
@@ -294,29 +305,34 @@
   // Replaces function's Code.
   function PatchFunctionCode(old_node, change_log) {
     var new_info = old_node.corresponding_node.info;
-    var shared_info_wrapper = old_node.live_shared_info_wrapper;
-    if (shared_info_wrapper) {
-      %LiveEditReplaceFunctionCode(new_info.raw_array,
-          shared_info_wrapper.raw_array);
+    if (old_node.live_shared_function_infos) {
+      old_node.live_shared_function_infos.forEach(function (old_info) {
+        %LiveEditReplaceFunctionCode(new_info.raw_array,
+                                     old_info.raw_array);
 
-      // The function got a new code. However, this new code brings all new
-      // instances of SharedFunctionInfo for nested functions. However,
-      // we want the original instances to be used wherever possible.
-      // (This is because old instances and new instances will be both
-      // linked to a script and breakpoints subsystem does not really
-      // expects this; neither does LiveEdit subsystem on next call).
-      for (var i = 0; i < old_node.children.length; i++) {
-        if (old_node.children[i].corresponding_node) {
-          var corresponding_child = old_node.children[i].corresponding_node;
-          var child_shared_info_wrapper =
-              old_node.children[i].live_shared_info_wrapper;
-          if (child_shared_info_wrapper) {
-            %LiveEditReplaceRefToNestedFunction(shared_info_wrapper.info,
-                corresponding_child.info.shared_function_info,
-                child_shared_info_wrapper.info);
+        // The function got a new code. However, this new code brings all new
+        // instances of SharedFunctionInfo for nested functions. However,
+        // we want the original instances to be used wherever possible.
+        // (This is because old instances and new instances will be both
+        // linked to a script and breakpoints subsystem does not really
+        // expects this; neither does LiveEdit subsystem on next call).
+        for (var i = 0; i < old_node.children.length; i++) {
+          if (old_node.children[i].corresponding_node) {
+            var corresponding_child_info =
+                old_node.children[i].corresponding_node.info.
+                    shared_function_info;
+
+            if (old_node.children[i].live_shared_function_infos) {
+              old_node.children[i].live_shared_function_infos.
+                  forEach(function (old_child_info) {
+                    %LiveEditReplaceRefToNestedFunction(old_info.info,
+                                                        corresponding_child_info,
+                                                        old_child_info.info);
+                  });
+            }
           }
         }
-      }
+      });
 
       change_log.push( {function_patched: new_info.function_name} );
     } else {
@@ -330,10 +346,13 @@
   // one representing its old version). This way the function still
   // may access its own text.
   function LinkToOldScript(old_info_node, old_script, report_array) {
-    var shared_info = old_info_node.live_shared_info_wrapper;
-    if (shared_info) {
-      %LiveEditFunctionSetScript(shared_info.info, old_script);
-      report_array.push( { name: shared_info.function_name } );
+    if (old_info_node.live_shared_function_infos) {
+      old_info_node.live_shared_function_infos.
+          forEach(function (info) {
+            %LiveEditFunctionSetScript(info.info, old_script);
+          });
+
+      report_array.push( { name: old_info_node.info.function_name } );
     } else {
       report_array.push(
           { name: old_info_node.info.function_name, not_found: true } );
@@ -525,7 +544,7 @@
     this.textual_corresponding_node = void 0;
     this.textually_unmatched_new_nodes = void 0;
 
-    this.live_shared_info_wrapper = void 0;
+    this.live_shared_function_infos = void 0;
   }
 
   // From array of function infos that is implicitly a tree creates
@@ -765,23 +784,27 @@
       shared_infos.push(new SharedInfoWrapper(shared_raw_list[i]));
     }
 
-    // Finds SharedFunctionInfo that corresponds compile info with index
+    // Finds all SharedFunctionInfos that corresponds to compile info
     // in old version of the script.
-    function FindFunctionInfo(compile_info) {
+    function FindFunctionInfos(compile_info) {
+      var wrappers = [];
+
       for (var i = 0; i < shared_infos.length; i++) {
         var wrapper = shared_infos[i];
         if (wrapper.start_position == compile_info.start_position &&
             wrapper.end_position == compile_info.end_position) {
-          return wrapper;
+          wrappers.push(wrapper);
         }
       }
+
+      if (wrappers.length > 0) {
+        return wrappers;
+      }
     }
 
     function TraverseTree(node) {
-      var info_wrapper = FindFunctionInfo(node.info);
-      if (info_wrapper) {
-        node.live_shared_info_wrapper = info_wrapper;
-      }
+      node.live_shared_function_infos = FindFunctionInfos(node.info);
+
       for (var i = 0; i < node.children.length; i++) {
         TraverseTree(node.children[i]);
       }
@@ -817,16 +840,18 @@
 
   // Changes positions (including all statments) in function.
   function PatchPositions(old_info_node, diff_array, report_array) {
-    var shared_info_wrapper = old_info_node.live_shared_info_wrapper;
-    if (!shared_info_wrapper) {
+    if (old_info_node.live_shared_function_infos) {
+      old_info_node.live_shared_function_infos.forEach(function (info) {
+          %LiveEditPatchFunctionPositions(info.raw_array,
+                                          diff_array);
+      });
+
+      report_array.push( { name: old_info_node.info.function_name } );
+    } else {
       // TODO(LiveEdit): function is not compiled yet or is already collected.
       report_array.push(
           { name: old_info_node.info.function_name, info_not_found: true } );
-      return;
     }
-    %LiveEditPatchFunctionPositions(shared_info_wrapper.raw_array,
-        diff_array);
-    report_array.push( { name: old_info_node.info.function_name } );
   }
 
   // Adds a suffix to script name to mark that it is old version.
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 642b3e6..c4cb68e 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -31,7 +31,9 @@
 #include "liveedit.h"
 
 #include "compiler.h"
+#include "compilation-cache.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "global-handles.h"
 #include "memory.h"
 #include "oprofile-agent.h"
@@ -605,18 +607,18 @@
 
   void FunctionDone() {
     HandleScope scope;
-    Object* element =
-        result_->GetElementNoExceptionThrown(current_parent_index_);
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(element);
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(
+            result_->GetElementNoExceptionThrown(current_parent_index_));
     current_parent_index_ = info.GetParentIndex();
   }
 
   // Saves only function code, because for a script function we
   // may never create a SharedFunctionInfo object.
   void FunctionCode(Handle<Code> function_code) {
-    Object* element =
-        result_->GetElementNoExceptionThrown(current_parent_index_);
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(element);
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(
+            result_->GetElementNoExceptionThrown(current_parent_index_));
     info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
   }
 
@@ -626,9 +628,9 @@
     if (!shared->IsSharedFunctionInfo()) {
       return;
     }
-    Object* element =
-        result_->GetElementNoExceptionThrown(current_parent_index_);
-    FunctionInfoWrapper info = FunctionInfoWrapper::cast(element);
+    FunctionInfoWrapper info =
+        FunctionInfoWrapper::cast(
+            result_->GetElementNoExceptionThrown(current_parent_index_));
     info.SetFunctionCode(Handle<Code>(shared->code()),
         Handle<Object>(shared->scope_info()));
     info.SetSharedFunctionInfo(shared);
@@ -828,6 +830,61 @@
 }
 
 
+// Returns true if an instance of candidate were inlined into function's code.
+static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
+  AssertNoAllocation no_gc;
+
+  if (function->code()->kind() != Code::OPTIMIZED_FUNCTION) return false;
+
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(function->code()->deoptimization_data());
+
+  if (data == Heap::empty_fixed_array()) return false;
+
+  FixedArray* literals = data->LiteralArray();
+
+  int inlined_count = data->InlinedFunctionCount()->value();
+  for (int i = 0; i < inlined_count; ++i) {
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    if (inlined->shared() == candidate) return true;
+  }
+
+  return false;
+}
+
+
+class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
+ public:
+  explicit DependentFunctionsDeoptimizingVisitor(
+      SharedFunctionInfo* function_info)
+      : function_info_(function_info) {}
+
+  virtual void EnterContext(Context* context) {
+  }
+
+  virtual void VisitFunction(JSFunction* function) {
+    if (function->shared() == function_info_ ||
+        IsInlined(function, function_info_)) {
+      Deoptimizer::DeoptimizeFunction(function);
+    }
+  }
+
+  virtual void LeaveContext(Context* context) {
+  }
+
+ private:
+  SharedFunctionInfo* function_info_;
+};
+
+
+static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
+  AssertNoAllocation no_allocation;
+
+  DependentFunctionsDeoptimizingVisitor visitor(function_info);
+  Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+}
+
+
 MaybeObject* LiveEdit::ReplaceFunctionCode(
     Handle<JSArray> new_compile_info_array,
     Handle<JSArray> shared_info_array) {
@@ -864,17 +921,38 @@
   shared_info->set_construct_stub(
       Builtins::builtin(Builtins::JSConstructStubGeneric));
 
+  DeoptimizeDependentFunctions(*shared_info);
+  CompilationCache::Remove(shared_info);
+
   return Heap::undefined_value();
 }
 
 
-// TODO(635): Eval caches its scripts (same text -- same compiled info).
-// Make sure we clear such caches.
+MaybeObject* LiveEdit::FunctionSourceUpdated(
+    Handle<JSArray> shared_info_array) {
+  HandleScope scope;
+
+  if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
+    return Top::ThrowIllegalOperation();
+  }
+
+  SharedInfoWrapper shared_info_wrapper(shared_info_array);
+  Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+
+  DeoptimizeDependentFunctions(*shared_info);
+  CompilationCache::Remove(shared_info);
+
+  return Heap::undefined_value();
+}
+
+
 void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
                                  Handle<Object> script_handle) {
   Handle<SharedFunctionInfo> shared_info =
       Handle<SharedFunctionInfo>::cast(UnwrapJSValue(function_wrapper));
   shared_info->set_script(*script_handle);
+
+  CompilationCache::Remove(shared_info);
 }
 
 
@@ -1135,11 +1213,14 @@
 // Check an activation against list of functions. If there is a function
 // that matches, its status in result array is changed to status argument value.
 static bool CheckActivation(Handle<JSArray> shared_info_array,
-                            Handle<JSArray> result, StackFrame* frame,
+                            Handle<JSArray> result,
+                            StackFrame* frame,
                             LiveEdit::FunctionPatchabilityStatus status) {
-  if (!frame->is_java_script()) {
-    return false;
-  }
+  if (!frame->is_java_script()) return false;
+
+  Handle<JSFunction> function(
+      JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
+
   int len = Smi::cast(shared_info_array->length())->value();
   for (int i = 0; i < len; i++) {
     JSValue* wrapper =
@@ -1147,7 +1228,7 @@
     Handle<SharedFunctionInfo> shared(
         SharedFunctionInfo::cast(wrapper->value()));
 
-    if (frame->code() == shared->code()) {
+    if (function->shared() == *shared || IsInlined(*function, *shared)) {
       SetElement(result, i, Handle<Smi>(Smi::FromInt(status)));
       return true;
     }
diff --git a/src/liveedit.h b/src/liveedit.h
index c9bf96d..3632180 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -87,6 +87,8 @@
       Handle<JSArray> new_compile_info_array,
       Handle<JSArray> shared_info_array);
 
+  static MaybeObject* FunctionSourceUpdated(Handle<JSArray> shared_info_array);
+
   // Updates script field in FunctionSharedInfo.
   static void SetFunctionScript(Handle<JSValue> function_wrapper,
                                 Handle<Object> script_handle);
diff --git a/src/log-utils.cc b/src/log-utils.cc
index d6d8754..c7b7567 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -273,29 +273,7 @@
 
 
 void LogMessageBuilder::AppendAddress(Address addr) {
-  static Address last_address_ = NULL;
-  AppendAddress(addr, last_address_);
-  last_address_ = addr;
-}
-
-
-void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
-  if (!FLAG_compress_log) {
-    Append("0x%" V8PRIxPTR, addr);
-  } else if (bias == NULL) {
-    Append("%" V8PRIxPTR, addr);
-  } else {
-    uintptr_t delta;
-    char sign;
-    if (addr >= bias) {
-      delta = addr - bias;
-      sign = '+';
-    } else {
-      delta = bias - addr;
-      sign = '-';
-    }
-    Append("%c%" V8PRIxPTR, sign, delta);
-  }
+  Append("0x%" V8PRIxPTR, addr);
 }
 
 
@@ -343,24 +321,6 @@
 }
 
 
-bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
-  return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
-}
-
-
-bool LogMessageBuilder::RetrieveCompressedPrevious(
-    LogRecordCompressor* compressor, const char* prefix) {
-  pos_ = 0;
-  if (prefix[0] != '\0') Append(prefix);
-  Vector<char> prev_record(Log::message_buffer_ + pos_,
-                           Log::kMessageBufferSize - pos_);
-  const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
-  if (!has_prev) return false;
-  pos_ += prev_record.length();
-  return true;
-}
-
-
 void LogMessageBuilder::WriteToLogFile() {
   ASSERT(pos_ <= Log::kMessageBufferSize);
   const int written = Log::Write(Log::message_buffer_, pos_);
@@ -369,145 +329,6 @@
   }
 }
 
-
-// Formatting string for back references to the whole line. E.g. "#2" means
-// "the second line above".
-const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
-
-// Formatting string for back references. E.g. "#2:10" means
-// "the second line above, start from char 10 (0-based)".
-const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
-
-
-LogRecordCompressor::~LogRecordCompressor() {
-  for (int i = 0; i < buffer_.length(); ++i) {
-    buffer_[i].Dispose();
-  }
-}
-
-
-static int GetNumberLength(int number) {
-  ASSERT(number >= 0);
-  ASSERT(number < 10000);
-  if (number < 10) return 1;
-  if (number < 100) return 2;
-  if (number < 1000) return 3;
-  return 4;
-}
-
-
-int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
-  // See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
-  return pos == 0 ? GetNumberLength(distance) + 1
-      : GetNumberLength(distance) + GetNumberLength(pos) + 2;
-}
-
-
-void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
-                                                 int distance,
-                                                 int pos) {
-  if (pos == 0) {
-    OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
-  } else {
-    OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
-  }
-}
-
-
-bool LogRecordCompressor::Store(const Vector<const char>& record) {
-  // Check if the record is the same as the last stored one.
-  if (curr_ != -1) {
-    Vector<const char>& curr = buffer_[curr_];
-    if (record.length() == curr.length()
-        && strncmp(record.start(), curr.start(), record.length()) == 0) {
-      return false;
-    }
-  }
-  // buffer_ is circular.
-  prev_ = curr_++;
-  curr_ %= buffer_.length();
-  Vector<char> record_copy = Vector<char>::New(record.length());
-  memcpy(record_copy.start(), record.start(), record.length());
-  buffer_[curr_].Dispose();
-  buffer_[curr_] =
-      Vector<const char>(record_copy.start(), record_copy.length());
-  return true;
-}
-
-
-bool LogRecordCompressor::RetrievePreviousCompressed(
-    Vector<char>* prev_record) {
-  if (prev_ == -1) return false;
-
-  int index = prev_;
-  // Distance from prev_.
-  int distance = 0;
-  // Best compression result among records in the buffer.
-  struct {
-    intptr_t truncated_len;
-    int distance;
-    int copy_from_pos;
-    int backref_size;
-  } best = {-1, 0, 0, 0};
-  Vector<const char>& prev = buffer_[prev_];
-  const char* const prev_start = prev.start();
-  const char* const prev_end = prev.start() + prev.length();
-  do {
-    // We're moving backwards until we reach the current record.
-    // Remember that buffer_ is circular.
-    if (--index == -1) index = buffer_.length() - 1;
-    ++distance;
-    if (index == curr_) break;
-
-    Vector<const char>& data = buffer_[index];
-    if (data.start() == NULL) break;
-    const char* const data_end = data.start() + data.length();
-    const char* prev_ptr = prev_end;
-    const char* data_ptr = data_end;
-    // Compare strings backwards, stop on the last matching character.
-    while (prev_ptr != prev_start && data_ptr != data.start()
-          && *(prev_ptr - 1) == *(data_ptr - 1)) {
-      --prev_ptr;
-      --data_ptr;
-    }
-    const intptr_t truncated_len = prev_end - prev_ptr;
-    const int copy_from_pos = static_cast<int>(data_ptr - data.start());
-    // Check if the length of compressed tail is enough.
-    if (truncated_len <= kMaxBackwardReferenceSize
-        && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
-      continue;
-    }
-
-    // Record compression results.
-    if (truncated_len > best.truncated_len) {
-      best.truncated_len = truncated_len;
-      best.distance = distance;
-      best.copy_from_pos = copy_from_pos;
-      best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
-    }
-  } while (true);
-
-  if (best.distance == 0) {
-    // Can't compress the previous record. Return as is.
-    ASSERT(prev_record->length() >= prev.length());
-    memcpy(prev_record->start(), prev.start(), prev.length());
-    prev_record->Truncate(prev.length());
-  } else {
-    // Copy the uncompressible part unchanged.
-    const intptr_t unchanged_len = prev.length() - best.truncated_len;
-    // + 1 for '\0'.
-    ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
-    memcpy(prev_record->start(), prev.start(), unchanged_len);
-    // Append the backward reference.
-    Vector<char> backref(
-        prev_record->start() + unchanged_len, best.backref_size + 1);
-    PrintBackwardReference(backref, best.distance, best.copy_from_pos);
-    ASSERT(strlen(backref.start()) - best.backref_size == 0);
-    prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size));
-  }
-  return true;
-}
-
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
index ffea928..719d370 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -176,50 +176,6 @@
 
   friend class Logger;
   friend class LogMessageBuilder;
-  friend class LogRecordCompressor;
-};
-
-
-// An utility class for performing backward reference compression
-// of string ends. It operates using a window of previous strings.
-class LogRecordCompressor {
- public:
-  // 'window_size' is the size of backward lookup window.
-  explicit LogRecordCompressor(int window_size)
-      : buffer_(window_size + kNoCompressionWindowSize),
-        kMaxBackwardReferenceSize(
-            GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
-        curr_(-1), prev_(-1) {
-  }
-
-  ~LogRecordCompressor();
-
-  // Fills vector with a compressed version of the previous record.
-  // Returns false if there is no previous record.
-  bool RetrievePreviousCompressed(Vector<char>* prev_record);
-
-  // Stores a record if it differs from a previous one (or there's no previous).
-  // Returns true, if the record has been stored.
-  bool Store(const Vector<const char>& record);
-
- private:
-  // The minimum size of a buffer: a place needed for the current and
-  // the previous record. Since there is no place for precedessors of a previous
-  // record, it can't be compressed at all.
-  static const int kNoCompressionWindowSize = 2;
-
-  // Formatting strings for back references.
-  static const char* kLineBackwardReferenceFormat;
-  static const char* kBackwardReferenceFormat;
-
-  static int GetBackwardReferenceSize(int distance, int pos);
-
-  static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
-
-  ScopedVector< Vector<const char> > buffer_;
-  const int kMaxBackwardReferenceSize;
-  int curr_;
-  int prev_;
 };
 
 
@@ -244,32 +200,14 @@
   // Append a heap string.
   void Append(String* str);
 
-  // Appends an address, compressing it if needed by offsetting
-  // from Logger::last_address_.
+  // Appends an address.
   void AppendAddress(Address addr);
 
-  // Appends an address, compressing it if needed.
-  void AppendAddress(Address addr, Address bias);
-
   void AppendDetailed(String* str, bool show_impl_info);
 
   // Append a portion of a string.
   void AppendStringPart(const char* str, int len);
 
-  // Stores log message into compressor, returns true if the message
-  // was stored (i.e. doesn't repeat the previous one).
-  bool StoreInCompressor(LogRecordCompressor* compressor);
-
-  // Sets log message to a previous version of compressed message.
-  // Returns false, if there is no previous message.
-  bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
-    return RetrieveCompressedPrevious(compressor, "");
-  }
-
-  // Does the same at the version without arguments, and sets a prefix.
-  bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
-                                  const char* prefix);
-
   // Write the log message to the log file currently opened.
   void WriteToLogFile();
 
diff --git a/src/log.cc b/src/log.cc
index 55f15de..db9ff7a 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -31,11 +31,14 @@
 
 #include "bootstrapper.h"
 #include "code-stubs.h"
+#include "deoptimizer.h"
 #include "global-handles.h"
 #include "log.h"
 #include "macro-assembler.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "string-stream.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -147,6 +150,7 @@
   sample->function = NULL;
   sample->frames_count = 0;
 
+  // Avoid collecting traces while doing GC.
   if (sample->state == GC) return;
 
   const Address js_entry_sp = Top::js_entry_sp(Top::GetCurrentThread());
@@ -155,15 +159,18 @@
     return;
   }
 
-  const Address functionAddr =
+  const Address function_address =
       sample->fp + JavaScriptFrameConstants::kFunctionOffset;
   if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
-                                             functionAddr)) {
-    sample->function = Memory::Address_at(functionAddr) - kHeapObjectTag;
+                                             function_address)) {
+    Object* object = Memory::Object_at(function_address);
+    if (object->IsHeapObject()) {
+      sample->function = HeapObject::cast(object)->address();
+    }
   }
 
   int i = 0;
-  const Address callback = VMState::external_callback();
+  const Address callback = Top::external_callback();
   // Surprisingly, PC can point _exactly_ to callback start, with good
   // probability, and this will result in reporting fake nested
   // callback call.
@@ -174,9 +181,10 @@
   SafeStackTraceFrameIterator it(sample->fp, sample->sp,
                                  sample->sp, js_entry_sp);
   while (!it.done() && i < TickSample::kMaxFramesCount) {
-    sample->stack[i++] =
-        reinterpret_cast<Address>(it.frame()->function_slot_object()) -
-            kHeapObjectTag;
+    Object* object = it.frame()->function_slot_object();
+    if (object->IsHeapObject()) {
+      sample->stack[i++] = HeapObject::cast(object)->address();
+    }
     it.Advance();
   }
   sample->frames_count = i;
@@ -189,8 +197,10 @@
 //
 class Ticker: public Sampler {
  public:
-  explicit Ticker(int interval):
-      Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL) {}
+  explicit Ticker(int interval) :
+      Sampler(interval),
+      window_(NULL),
+      profiler_(NULL) {}
 
   ~Ticker() { if (IsActive()) Stop(); }
 
@@ -206,22 +216,24 @@
 
   void ClearWindow() {
     window_ = NULL;
-    if (!profiler_ && IsActive()) Stop();
+    if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
   }
 
   void SetProfiler(Profiler* profiler) {
+    ASSERT(profiler_ == NULL);
     profiler_ = profiler;
+    IncreaseProfilingDepth();
     if (!FLAG_prof_lazy && !IsActive()) Start();
   }
 
   void ClearProfiler() {
+    DecreaseProfilingDepth();
     profiler_ = NULL;
-    if (!window_ && IsActive()) Stop();
+    if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
   }
 
  protected:
   virtual void DoSampleStack(TickSample* sample) {
-    ASSERT(IsSynchronous());
     StackTracer::Trace(sample);
   }
 
@@ -291,7 +303,6 @@
   Logger::ticker_->SetProfiler(this);
 
   Logger::ProfilerBeginEvent();
-  Logger::LogAliases();
 }
 
 
@@ -331,43 +342,21 @@
 Ticker* Logger::ticker_ = NULL;
 Profiler* Logger::profiler_ = NULL;
 SlidingStateWindow* Logger::sliding_state_window_ = NULL;
-const char** Logger::log_events_ = NULL;
-CompressionHelper* Logger::compression_helper_ = NULL;
 int Logger::logging_nesting_ = 0;
 int Logger::cpu_profiler_nesting_ = 0;
 int Logger::heap_profiler_nesting_ = 0;
 
-#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
-const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
-  LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
+#define DECLARE_EVENT(ignore1, name) name,
+const char* kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+  LOG_EVENTS_AND_TAGS_LIST(DECLARE_EVENT)
 };
-#undef DECLARE_LONG_EVENT
-
-#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
-const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
-  LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
-};
-#undef DECLARE_SHORT_EVENT
+#undef DECLARE_EVENT
 
 
 void Logger::ProfilerBeginEvent() {
   if (!Log::IsEnabled()) return;
   LogMessageBuilder msg;
   msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
-  if (FLAG_compress_log) {
-    msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
-  }
-  msg.WriteToLogFile();
-}
-
-
-void Logger::LogAliases() {
-  if (!Log::IsEnabled() || !FLAG_compress_log) return;
-  LogMessageBuilder msg;
-  for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
-    msg.Append("alias,%s,%s\n",
-               kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
-  }
   msg.WriteToLogFile();
 }
 
@@ -675,54 +664,15 @@
 
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
-
-// A class that contains all common code dealing with record compression.
-class CompressionHelper {
- public:
-  explicit CompressionHelper(int window_size)
-      : compressor_(window_size), repeat_count_(0) { }
-
-  // Handles storing message in compressor, retrieving the previous one and
-  // prefixing it with repeat count, if needed.
-  // Returns true if message needs to be written to log.
-  bool HandleMessage(LogMessageBuilder* msg) {
-    if (!msg->StoreInCompressor(&compressor_)) {
-      // Current message repeats the previous one, don't write it.
-      ++repeat_count_;
-      return false;
-    }
-    if (repeat_count_ == 0) {
-      return msg->RetrieveCompressedPrevious(&compressor_);
-    }
-    OS::SNPrintF(prefix_, "%s,%d,",
-                 Logger::log_events_[Logger::REPEAT_META_EVENT],
-                 repeat_count_ + 1);
-    repeat_count_ = 0;
-    return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
-  }
-
- private:
-  LogRecordCompressor compressor_;
-  int repeat_count_;
-  EmbeddedVector<char, 20> prefix_;
-};
-
-#endif  // ENABLE_LOGGING_AND_PROFILING
-
-
-#ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::CallbackEventInternal(const char* prefix, const char* name,
                                    Address entry_point) {
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("%s,%s,",
-             log_events_[CODE_CREATION_EVENT], log_events_[CALLBACK_TAG]);
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[CALLBACK_TAG]);
   msg.AppendAddress(entry_point);
   msg.Append(",1,\"%s%s\"", prefix, name);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 }
@@ -759,15 +709,28 @@
 }
 
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static const char* ComputeMarker(Code* code) {
+  switch (code->kind()) {
+    case Code::FUNCTION: return code->optimizable() ? "~" : "";
+    case Code::OPTIMIZED_FUNCTION: return "*";
+    default: return "";
+  }
+}
+#endif
+
+
 void Logger::CodeCreateEvent(LogEventsAndTags tag,
                              Code* code,
                              const char* comment) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.Append("%s,%s,",
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[tag]);
   msg.AppendAddress(code->address());
-  msg.Append(",%d,\"", code->ExecutableSize());
+  msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code));
   for (const char* p = comment; *p != '\0'; p++) {
     if (*p == '"') {
       msg.Append('\\');
@@ -776,10 +739,6 @@
   }
   msg.Append('"');
   LowLevelCodeCreateEvent(code, &msg);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -792,14 +751,12 @@
   LogMessageBuilder msg;
   SmartPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.Append("%s,%s,",
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[tag]);
   msg.AppendAddress(code->address());
-  msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
+  msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str);
   LowLevelCodeCreateEvent(code, &msg);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -816,15 +773,17 @@
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   SmartPointer<char> sourcestr =
       source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.Append("%s,%s,",
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[tag]);
   msg.AppendAddress(code->address());
-  msg.Append(",%d,\"%s %s:%d\"",
-             code->ExecutableSize(), *str, *sourcestr, line);
+  msg.Append(",%d,\"%s%s %s:%d\"",
+             code->ExecutableSize(),
+             ComputeMarker(code),
+             *str,
+             *sourcestr,
+             line);
   LowLevelCodeCreateEvent(code, &msg);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -835,14 +794,12 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.Append("%s,%s,",
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[tag]);
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
   LowLevelCodeCreateEvent(code, &msg);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -853,7 +810,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code || !FLAG_ll_prof) return;
   LogMessageBuilder msg;
-  msg.Append("%s\n", log_events_[CODE_MOVING_GC]);
+  msg.Append("%s\n", kLogEventsNames[CODE_MOVING_GC]);
   msg.WriteToLogFile();
   OS::SignalCodeMovingGC();
 #endif
@@ -865,16 +822,13 @@
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.Append("%s,%s,",
-             log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
+             kLogEventsNames[CODE_CREATION_EVENT],
+             kLogEventsNames[REG_EXP_TAG]);
   msg.AppendAddress(code->address());
   msg.Append(",%d,\"", code->ExecutableSize());
   msg.AppendDetailed(source, false);
   msg.Append('\"');
   LowLevelCodeCreateEvent(code, &msg);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -899,13 +853,9 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_snapshot_positions) return;
   LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[SNAPSHOT_POSITION_EVENT]);
+  msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
   msg.AppendAddress(addr);
   msg.Append(",%d", pos);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -917,18 +867,12 @@
   // This function can be called from GC iterators (during Scavenge,
   // MC, and MS), so marking bits can be set on objects. That's
   // why unchecked accessors are used here.
-  static Address prev_code = NULL;
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
+  msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]);
   msg.AppendAddress(function->address());
   msg.Append(',');
-  msg.AppendAddress(function->unchecked_code()->address(), prev_code);
-  prev_code = function->unchecked_code()->address();
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
+  msg.AppendAddress(function->unchecked_code()->address());
   msg.Append('\n');
   msg.WriteToLogFile();
 #endif
@@ -962,18 +906,12 @@
 void Logger::MoveEventInternal(LogEventsAndTags event,
                                Address from,
                                Address to) {
-  static Address prev_to_ = NULL;
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[event]);
+  msg.Append("%s,", kLogEventsNames[event]);
   msg.AppendAddress(from);
   msg.Append(',');
-  msg.AppendAddress(to, prev_to_);
-  prev_to_ = to;
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
+  msg.AppendAddress(to);
   msg.Append('\n');
   msg.WriteToLogFile();
 }
@@ -984,12 +922,8 @@
 void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[event]);
+  msg.Append("%s,", kLogEventsNames[event]);
   msg.AppendAddress(from);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
   msg.Append('\n');
   msg.WriteToLogFile();
 }
@@ -1177,30 +1111,20 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::TickEvent(TickSample* sample, bool overflow) {
   if (!Log::IsEnabled() || !FLAG_prof) return;
-  static Address prev_sp = NULL;
-  static Address prev_function = NULL;
   LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[TICK_EVENT]);
-  Address prev_addr = sample->pc;
-  msg.AppendAddress(prev_addr);
+  msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
+  msg.AppendAddress(sample->pc);
   msg.Append(',');
-  msg.AppendAddress(sample->sp, prev_sp);
-  prev_sp = sample->sp;
+  msg.AppendAddress(sample->sp);
   msg.Append(',');
-  msg.AppendAddress(sample->function, prev_function);
-  prev_function = sample->function;
+  msg.AppendAddress(sample->function);
   msg.Append(",%d", static_cast<int>(sample->state));
   if (overflow) {
     msg.Append(",overflow");
   }
   for (int i = 0; i < sample->frames_count; ++i) {
     msg.Append(',');
-    msg.AppendAddress(sample->stack[i], prev_addr);
-    prev_addr = sample->stack[i];
-  }
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
+    msg.AppendAddress(sample->stack[i]);
   }
   msg.Append('\n');
   msg.WriteToLogFile();
@@ -1226,7 +1150,9 @@
     if (--cpu_profiler_nesting_ == 0) {
       profiler_->pause();
       if (FLAG_prof_lazy) {
-        if (!FLAG_sliding_state_window) ticker_->Stop();
+        if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
+          ticker_->Stop();
+        }
         FLAG_log_code = false;
         // Must be the same message as Log::kDynamicBufferSeal.
         LOG(UncheckedStringEvent("profiler", "pause"));
@@ -1262,7 +1188,9 @@
         LogCompiledFunctions();
         LogFunctionObjects();
         LogAccessorCallbacks();
-        if (!FLAG_sliding_state_window) ticker_->Start();
+        if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
+          ticker_->Start();
+        }
       }
       profiler_->resume();
     }
@@ -1295,9 +1223,41 @@
 }
 
 
-static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
+class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
+ public:
+  EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
+                                     Handle<Code>* code_objects,
+                                     int* count)
+      : sfis_(sfis), code_objects_(code_objects), count_(count) { }
+
+  virtual void EnterContext(Context* context) {}
+  virtual void LeaveContext(Context* context) {}
+
+  virtual void VisitFunction(JSFunction* function) {
+    if (sfis_ != NULL) {
+      sfis_[*count_] = Handle<SharedFunctionInfo>(function->shared());
+    }
+    if (code_objects_ != NULL) {
+      ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+      code_objects_[*count_] = Handle<Code>(function->code());
+    }
+    *count_ = *count_ + 1;
+  }
+
+ private:
+  Handle<SharedFunctionInfo>* sfis_;
+  Handle<Code>* code_objects_;
+  int* count_;
+};
+
+
+static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
+                                      Handle<Code>* code_objects) {
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
+
+  // Iterate the heap to find shared function info objects and record
+  // the unoptimized code for them.
   HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
@@ -1305,11 +1265,22 @@
     if (sfi->is_compiled()
         && (!sfi->script()->IsScript()
             || Script::cast(sfi->script())->HasValidSource())) {
-      if (sfis != NULL)
+      if (sfis != NULL) {
         sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
+      }
+      if (code_objects != NULL) {
+        code_objects[compiled_funcs_count] = Handle<Code>(sfi->code());
+      }
       ++compiled_funcs_count;
     }
   }
+
+  // Iterate all optimized functions in all contexts.
+  EnumerateOptimizedFunctionsVisitor visitor(sfis,
+                                             code_objects,
+                                             &compiled_funcs_count);
+  Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+
   return compiled_funcs_count;
 }
 
@@ -1321,9 +1292,11 @@
     const char* description = "Unknown code from the snapshot";
     switch (code_object->kind()) {
       case Code::FUNCTION:
+      case Code::OPTIMIZED_FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
-      case Code::BINARY_OP_IC:
-        // fall through
+      case Code::BINARY_OP_IC:  // fall through
+      case Code::TYPE_RECORDING_BINARY_OP_IC:   // fall through
+      case Code::COMPARE_IC:  // fall through
       case Code::STUB:
         description =
             CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
@@ -1406,9 +1379,10 @@
 
 void Logger::LogCompiledFunctions() {
   HandleScope scope;
-  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
+  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
-  EnumerateCompiledFunctions(sfis.start());
+  ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
+  EnumerateCompiledFunctions(sfis.start(), code_objects.start());
 
   // During iteration, there can be heap allocation due to
   // GetScriptLineNumber call.
@@ -1425,18 +1399,18 @@
         if (line_num > 0) {
           PROFILE(CodeCreateEvent(
               Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-              shared->code(), *func_name,
+              *code_objects[i], *func_name,
               *script_name, line_num + 1));
         } else {
           // Can't distinguish eval and script here, so always use Script.
           PROFILE(CodeCreateEvent(
               Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-              shared->code(), *script_name));
+              *code_objects[i], *script_name));
         }
       } else {
         PROFILE(CodeCreateEvent(
             Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
-            shared->code(), *func_name));
+            *code_objects[i], *func_name));
       }
     } else if (shared->IsApiFunction()) {
       // API function.
@@ -1450,7 +1424,7 @@
       }
     } else {
       PROFILE(CodeCreateEvent(
-          Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
+          Logger::LAZY_COMPILE_TAG, *code_objects[i], *func_name));
     }
   }
 }
@@ -1571,8 +1545,6 @@
     }
   }
 
-  ASSERT(VMState::is_outermost_external());
-
   if (FLAG_ll_prof) LogCodeInfo();
 
   ticker_ = new Ticker(kSamplingIntervalMs);
@@ -1581,12 +1553,6 @@
     sliding_state_window_ = new SlidingStateWindow();
   }
 
-  log_events_ = FLAG_compress_log ?
-      kCompressedLogEventsNames : kLongLogEventsNames;
-  if (FLAG_compress_log) {
-    compression_helper_ = new CompressionHelper(kCompressionWindowSize);
-  }
-
   if (start_logging) {
     logging_nesting_ = 1;
   }
@@ -1604,7 +1570,6 @@
   }
 
   LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
-
   return true;
 
 #else
@@ -1613,6 +1578,21 @@
 }
 
 
+void Logger::EnsureTickerStarted() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  ASSERT(ticker_ != NULL);
+  if (!ticker_->IsActive()) ticker_->Start();
+#endif
+}
+
+
+void Logger::EnsureTickerStopped() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
+#endif
+}
+
+
 void Logger::TearDown() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   LogMessageBuilder::set_write_failure_handler(NULL);
@@ -1624,9 +1604,6 @@
     profiler_ = NULL;
   }
 
-  delete compression_helper_;
-  compression_helper_ = NULL;
-
   delete sliding_state_window_;
   sliding_state_window_ = NULL;
 
diff --git a/src/log.h b/src/log.h
index 3a4d79b..771709c 100644
--- a/src/log.h
+++ b/src/log.h
@@ -74,7 +74,6 @@
 class Semaphore;
 class SlidingStateWindow;
 class LogMessageBuilder;
-class CompressionHelper;
 
 #undef LOG
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -88,58 +87,55 @@
 #endif
 
 #define LOG_EVENTS_AND_TAGS_LIST(V) \
-  V(CODE_CREATION_EVENT,            "code-creation",          "cc")       \
-  V(CODE_MOVE_EVENT,                "code-move",              "cm")       \
-  V(CODE_DELETE_EVENT,              "code-delete",            "cd")       \
-  V(CODE_MOVING_GC,                 "code-moving-gc",         "cg")       \
-  V(FUNCTION_CREATION_EVENT,        "function-creation",      "fc")       \
-  V(FUNCTION_MOVE_EVENT,            "function-move",          "fm")       \
-  V(FUNCTION_DELETE_EVENT,          "function-delete",        "fd")       \
-  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos",           "sp")       \
-  V(TICK_EVENT,                     "tick",                   "t")        \
-  V(REPEAT_META_EVENT,              "repeat",                 "r")        \
-  V(BUILTIN_TAG,                    "Builtin",                "bi")       \
-  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak",         "cdb")      \
-  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi")    \
-  V(CALL_IC_TAG,                    "CallIC",                 "cic")      \
-  V(CALL_INITIALIZE_TAG,            "CallInitialize",         "ci")       \
-  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic",        "cmm")      \
-  V(CALL_MISS_TAG,                  "CallMiss",               "cm")       \
-  V(CALL_NORMAL_TAG,                "CallNormal",             "cn")       \
-  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic",     "cpm")      \
-  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak",    "kcdb")     \
-  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                                 \
-    "KeyedCallDebugPrepareStepIn",                                        \
-    "kcdbsi")                                                             \
-  V(KEYED_CALL_IC_TAG,              "KeyedCallIC",            "kcic")     \
-  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize",    "kci")      \
-  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic",   "kcmm")     \
-  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss",          "kcm")      \
-  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal",        "kcn")      \
-  V(KEYED_CALL_PRE_MONOMORPHIC_TAG,                                       \
-    "KeyedCallPreMonomorphic",                                            \
-    "kcpm")                                                               \
-  V(CALLBACK_TAG,                   "Callback",               "cb")       \
-  V(EVAL_TAG,                       "Eval",                   "e")        \
-  V(FUNCTION_TAG,                   "Function",               "f")        \
-  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC",            "klic")     \
-  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC",           "ksic")     \
-  V(LAZY_COMPILE_TAG,               "LazyCompile",            "lc")       \
-  V(LOAD_IC_TAG,                    "LoadIC",                 "lic")      \
-  V(REG_EXP_TAG,                    "RegExp",                 "re")       \
-  V(SCRIPT_TAG,                     "Script",                 "sc")       \
-  V(STORE_IC_TAG,                   "StoreIC",                "sic")      \
-  V(STUB_TAG,                       "Stub",                   "s")        \
-  V(NATIVE_FUNCTION_TAG,            "Function",               "f")        \
-  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile",            "lc")       \
-  V(NATIVE_SCRIPT_TAG,              "Script",                 "sc")
+  V(CODE_CREATION_EVENT,            "code-creation")            \
+  V(CODE_MOVE_EVENT,                "code-move")                \
+  V(CODE_DELETE_EVENT,              "code-delete")              \
+  V(CODE_MOVING_GC,                 "code-moving-gc")           \
+  V(FUNCTION_CREATION_EVENT,        "function-creation")        \
+  V(FUNCTION_MOVE_EVENT,            "function-move")            \
+  V(FUNCTION_DELETE_EVENT,          "function-delete")          \
+  V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos")             \
+  V(TICK_EVENT,                     "tick")                     \
+  V(REPEAT_META_EVENT,              "repeat")                   \
+  V(BUILTIN_TAG,                    "Builtin")                  \
+  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak")           \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn")   \
+  V(CALL_IC_TAG,                    "CallIC")                   \
+  V(CALL_INITIALIZE_TAG,            "CallInitialize")           \
+  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic")          \
+  V(CALL_MISS_TAG,                  "CallMiss")                 \
+  V(CALL_NORMAL_TAG,                "CallNormal")               \
+  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic")       \
+  V(KEYED_CALL_DEBUG_BREAK_TAG,     "KeyedCallDebugBreak")      \
+  V(KEYED_CALL_DEBUG_PREPARE_STEP_IN_TAG,                       \
+    "KeyedCallDebugPrepareStepIn")                              \
+  V(KEYED_CALL_IC_TAG,              "KeyedCallIC")              \
+  V(KEYED_CALL_INITIALIZE_TAG,      "KeyedCallInitialize")      \
+  V(KEYED_CALL_MEGAMORPHIC_TAG,     "KeyedCallMegamorphic")     \
+  V(KEYED_CALL_MISS_TAG,            "KeyedCallMiss")            \
+  V(KEYED_CALL_NORMAL_TAG,          "KeyedCallNormal")          \
+  V(KEYED_CALL_PRE_MONOMORPHIC_TAG, "KeyedCallPreMonomorphic")  \
+  V(CALLBACK_TAG,                   "Callback")                 \
+  V(EVAL_TAG,                       "Eval")                     \
+  V(FUNCTION_TAG,                   "Function")                 \
+  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC")              \
+  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC")             \
+  V(LAZY_COMPILE_TAG,               "LazyCompile")              \
+  V(LOAD_IC_TAG,                    "LoadIC")                   \
+  V(REG_EXP_TAG,                    "RegExp")                   \
+  V(SCRIPT_TAG,                     "Script")                   \
+  V(STORE_IC_TAG,                   "StoreIC")                  \
+  V(STUB_TAG,                       "Stub")                     \
+  V(NATIVE_FUNCTION_TAG,            "Function")                 \
+  V(NATIVE_LAZY_COMPILE_TAG,        "LazyCompile")              \
+  V(NATIVE_SCRIPT_TAG,              "Script")
 // Note that 'NATIVE_' cases for functions and scripts are mapped onto
 // original tags when writing to the log.
 
 
 class Logger {
  public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+#define DECLARE_ENUM(enum_item, ignore) enum_item,
   enum LogEventsAndTags {
     LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
     NUMBER_OF_LOG_EVENTS
@@ -149,6 +145,9 @@
   // Acquires resources for logging if the right flags are set.
   static bool Setup();
 
+  static void EnsureTickerStarted();
+  static void EnsureTickerStopped();
+
   // Frees resources acquired in Setup.
   static void TearDown();
 
@@ -289,9 +288,6 @@
 
  private:
 
-  // Size of window used for log records compression.
-  static const int kCompressionWindowSize = 4;
-
   // Emits the profiler's first message.
   static void ProfilerBeginEvent();
 
@@ -309,9 +305,6 @@
   static void DeleteEventInternal(LogEventsAndTags event,
                                   Address from);
 
-  // Emits aliases for compressed messages.
-  static void LogAliases();
-
   // Emits the source code of a regexp. Used by regexp events.
   static void LogRegExpSource(Handle<JSRegExp> regexp);
 
@@ -354,15 +347,8 @@
   // recent VM states.
   static SlidingStateWindow* sliding_state_window_;
 
-  // An array of log events names.
-  static const char** log_events_;
-
-  // An instance of helper created if log compression is enabled.
-  static CompressionHelper* compression_helper_;
-
   // Internal implementation classes with access to
   // private members.
-  friend class CompressionHelper;
   friend class EventLog;
   friend class TimeLog;
   friend class Profiler;
diff --git a/src/macros.py b/src/macros.py
index 1ceb620..6d66def 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -140,15 +140,14 @@
 
 # Limit according to ECMA 262 15.9.1.1
 const MAX_TIME_MS = 8640000000000000;
+# Limit which is MAX_TIME_MS + msPerMonth.
+const MAX_TIME_BEFORE_UTC = 8640002592000000;
 
 # Gets the value of a Date object. If arg is not a Date object
 # a type error is thrown.
 macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
 macro DAY(time) = ($floor(time / 86400000));
-macro MONTH_FROM_TIME(time) = (MonthFromTime(time));
-macro DATE_FROM_TIME(time) = (DateFromTime(time));
-macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DATE_FROM_TIME(time));
-macro YEAR_FROM_TIME(time) = (YearFromTime(time));
+macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
 macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
 macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
 macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 40194e3..8ade41c 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -215,6 +215,121 @@
 
 static MarkingStack marking_stack;
 
+class FlushCode : public AllStatic {
+ public:
+  static void AddCandidate(SharedFunctionInfo* shared_info) {
+    SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+    shared_function_info_candidates_head_ = shared_info;
+  }
+
+
+  static void AddCandidate(JSFunction* function) {
+    ASSERT(function->unchecked_code() ==
+           function->unchecked_shared()->unchecked_code());
+
+    SetNextCandidate(function, jsfunction_candidates_head_);
+    jsfunction_candidates_head_ = function;
+  }
+
+
+  static void ProcessCandidates() {
+    ProcessSharedFunctionInfoCandidates();
+    ProcessJSFunctionCandidates();
+  }
+
+ private:
+  static void ProcessJSFunctionCandidates() {
+    Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+
+    JSFunction* candidate = jsfunction_candidates_head_;
+    JSFunction* next_candidate;
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+
+      SharedFunctionInfo* shared = candidate->unchecked_shared();
+
+      Code* code = shared->unchecked_code();
+      if (!code->IsMarked()) {
+        shared->set_code(lazy_compile);
+        candidate->set_code(lazy_compile);
+      } else {
+        candidate->set_code(shared->unchecked_code());
+      }
+
+      candidate = next_candidate;
+    }
+
+    jsfunction_candidates_head_ = NULL;
+  }
+
+
+  static void ProcessSharedFunctionInfoCandidates() {
+    Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
+
+    SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+    SharedFunctionInfo* next_candidate;
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+      SetNextCandidate(candidate, NULL);
+
+      Code* code = candidate->unchecked_code();
+      if (!code->IsMarked()) {
+        candidate->set_code(lazy_compile);
+      }
+
+      candidate = next_candidate;
+    }
+
+    shared_function_info_candidates_head_ = NULL;
+  }
+
+
+  static JSFunction** GetNextCandidateField(JSFunction* candidate) {
+    return reinterpret_cast<JSFunction**>(
+        candidate->address() + JSFunction::kCodeEntryOffset);
+  }
+
+
+  static JSFunction* GetNextCandidate(JSFunction* candidate) {
+    return *GetNextCandidateField(candidate);
+  }
+
+
+  static void SetNextCandidate(JSFunction* candidate,
+                               JSFunction* next_candidate) {
+    *GetNextCandidateField(candidate) = next_candidate;
+  }
+
+
+  STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
+
+
+  static SharedFunctionInfo** GetNextCandidateField(
+      SharedFunctionInfo* candidate) {
+    Code* code = candidate->unchecked_code();
+    return reinterpret_cast<SharedFunctionInfo**>(
+        code->address() + Code::kHeaderPaddingStart);
+  }
+
+
+  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
+    return *GetNextCandidateField(candidate);
+  }
+
+
+  static void SetNextCandidate(SharedFunctionInfo* candidate,
+                               SharedFunctionInfo* next_candidate) {
+    *GetNextCandidateField(candidate) = next_candidate;
+  }
+
+  static JSFunction* jsfunction_candidates_head_;
+
+  static SharedFunctionInfo* shared_function_info_candidates_head_;
+};
+
+JSFunction* FlushCode::jsfunction_candidates_head_ = NULL;
+
+SharedFunctionInfo* FlushCode::shared_function_info_candidates_head_ = NULL;
 
 static inline HeapObject* ShortCircuitConsString(Object** p) {
   // Optimization: If the heap object pointed to by p is a non-symbol
@@ -260,8 +375,13 @@
   static void EnableCodeFlushing(bool enabled) {
     if (enabled) {
       table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
+      table_.Register(kVisitSharedFunctionInfo,
+                      &VisitSharedFunctionInfoAndFlushCode);
+
     } else {
       table_.Register(kVisitJSFunction, &VisitJSFunction);
+      table_.Register(kVisitSharedFunctionInfo,
+                      &VisitSharedFunctionInfoGeneric);
     }
   }
 
@@ -287,8 +407,6 @@
                                       Context::MarkCompactBodyDescriptor,
                                       void>::Visit);
 
-    table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
@@ -304,7 +422,11 @@
 
     table_.Register(kVisitCode, &VisitCode);
 
-    table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
+    table_.Register(kVisitSharedFunctionInfo,
+                    &VisitSharedFunctionInfoAndFlushCode);
+
+    table_.Register(kVisitJSFunction,
+                    &VisitJSFunctionAndFlushCode);
 
     table_.Register(kVisitPropertyCell,
                     &FixedBodyVisitor<StaticMarkingVisitor,
@@ -350,6 +472,16 @@
     }
   }
 
+  static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+    ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+    Object* cell = rinfo->target_cell();
+    Object* old_cell = cell;
+    VisitPointer(&cell);
+    if (cell != old_cell) {
+      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+    }
+  }
+
   static inline void VisitDebugTarget(RelocInfo* rinfo) {
     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
             rinfo->IsPatchedReturnSequence()) ||
@@ -446,62 +578,75 @@
         function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
   }
 
-
-  static void FlushCodeForFunction(JSFunction* function) {
+  inline static bool IsFlushable(JSFunction* function) {
     SharedFunctionInfo* shared_info = function->unchecked_shared();
 
-    if (shared_info->IsMarked()) return;
-
-    // Special handling if the function and shared info objects
-    // have different code objects.
-    if (function->unchecked_code() != shared_info->unchecked_code()) {
-      // If the shared function has been flushed but the function has not,
-      // we flush the function if possible.
-      if (!IsCompiled(shared_info) &&
-          IsCompiled(function) &&
-          !function->unchecked_code()->IsMarked()) {
-        function->set_code(shared_info->unchecked_code());
-      }
-      return;
+    // Code is either on stack, in compilation cache or referenced
+    // by optimized version of function.
+    if (function->unchecked_code()->IsMarked()) {
+      shared_info->set_code_age(0);
+      return false;
     }
 
-    // Code is either on stack or in compilation cache.
+    // We do not flush code for optimized functions.
+    if (function->code() != shared_info->unchecked_code()) {
+      return false;
+    }
+
+    return IsFlushable(shared_info);
+  }
+
+  inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
+    // Code is either on stack, in compilation cache or referenced
+    // by optimized version of function.
     if (shared_info->unchecked_code()->IsMarked()) {
       shared_info->set_code_age(0);
-      return;
+      return false;
     }
 
     // The function must be compiled and have the source code available,
     // to be able to recompile it in case we need the function again.
-    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
+    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
+      return false;
+    }
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
     if (function_data->IsHeapObject() &&
         (SafeMap(function_data)->instance_type() ==
          FUNCTION_TEMPLATE_INFO_TYPE)) {
-      return;
+      return false;
     }
 
     // Only flush code for functions.
-    if (shared_info->code()->kind() != Code::FUNCTION) return;
+    if (shared_info->code()->kind() != Code::FUNCTION) return false;
 
     // Function must be lazy compilable.
-    if (!shared_info->allows_lazy_compilation()) return;
+    if (!shared_info->allows_lazy_compilation()) return false;
 
     // If this is a full script wrapped in a function we do no flush the code.
-    if (shared_info->is_toplevel()) return;
+    if (shared_info->is_toplevel()) return false;
 
     // Age this shared function info.
     if (shared_info->code_age() < kCodeAgeThreshold) {
       shared_info->set_code_age(shared_info->code_age() + 1);
-      return;
+      return false;
     }
 
-    // Compute the lazy compilable version of the code.
-    Code* code = Builtins::builtin(Builtins::LazyCompile);
-    shared_info->set_code(code);
-    function->set_code(code);
+    return true;
+  }
+
+
+  static bool FlushCodeForFunction(JSFunction* function) {
+    if (!IsFlushable(function)) return false;
+
+    // This function's code looks flushable. But we have to postpone the
+    // decision until we see all functions that point to the same
+    // SharedFunctionInfo because some of them might be optimized.
+    // That would make the nonoptimized version of the code nonflushable,
+    // because it is required for bailing out from optimized code.
+    FlushCode::AddCandidate(function);
+    return true;
   }
 
 
@@ -539,17 +684,38 @@
   }
 
 
-  static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
+  static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-    if (shared->IsInobjectSlackTrackingInProgress()) {
-      shared->DetachInitialMap();
-    }
+
+    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+
     FixedBodyVisitor<StaticMarkingVisitor,
                      SharedFunctionInfo::BodyDescriptor,
                      void>::Visit(map, object);
   }
 
 
+  static void VisitSharedFunctionInfoAndFlushCode(Map* map,
+                                                  HeapObject* object) {
+    VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
+  }
+
+
+  static void VisitSharedFunctionInfoAndFlushCodeGeneric(
+      Map* map, HeapObject* object, bool known_flush_code_candidate) {
+    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
+
+    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+
+    if (!known_flush_code_candidate) {
+      known_flush_code_candidate = IsFlushable(shared);
+      if (known_flush_code_candidate) FlushCode::AddCandidate(shared);
+    }
+
+    VisitSharedFunctionInfoFields(object, known_flush_code_candidate);
+  }
+
+
   static void VisitCodeEntry(Address entry_address) {
     Object* code = Code::GetObjectFromEntryAddress(entry_address);
     Object* old_code = code;
@@ -564,30 +730,98 @@
   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
     JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
     // The function must have a valid context and not be a builtin.
+    bool flush_code_candidate = false;
     if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
-      FlushCodeForFunction(jsfunction);
+      flush_code_candidate = FlushCodeForFunction(jsfunction);
     }
-    VisitJSFunction(map, object);
+
+    if (!flush_code_candidate) {
+      MarkCompactCollector::MarkObject(
+          jsfunction->unchecked_shared()->unchecked_code());
+
+      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        // For optimized functions we should retain both non-optimized version
+        // of it's code and non-optimized version of all inlined functions.
+        // This is required to support bailing out from inlined code.
+        DeoptimizationInputData* data =
+            reinterpret_cast<DeoptimizationInputData*>(
+                jsfunction->unchecked_code()->unchecked_deoptimization_data());
+
+        FixedArray* literals = data->UncheckedLiteralArray();
+
+        for (int i = 0, count = data->InlinedFunctionCount()->value();
+             i < count;
+             i++) {
+          JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
+          MarkCompactCollector::MarkObject(
+              inlined->unchecked_shared()->unchecked_code());
+        }
+      }
+    }
+
+    VisitJSFunctionFields(map,
+                          reinterpret_cast<JSFunction*>(object),
+                          flush_code_candidate);
   }
 
 
   static void VisitJSFunction(Map* map, HeapObject* object) {
-#define SLOT_ADDR(obj, offset)   \
-    reinterpret_cast<Object**>((obj)->address() + offset)
+    VisitJSFunctionFields(map,
+                          reinterpret_cast<JSFunction*>(object),
+                          false);
+  }
 
+
+#define SLOT_ADDR(obj, offset) \
+  reinterpret_cast<Object**>((obj)->address() + offset)
+
+
+  static inline void VisitJSFunctionFields(Map* map,
+                                           JSFunction* object,
+                                           bool flush_code_candidate) {
     VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
                   SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
 
-    VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+    if (!flush_code_candidate) {
+      VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
+    } else {
+      // Don't visit code object.
+
+      // Visit shared function info to avoid double checking of it's
+      // flushability.
+      SharedFunctionInfo* shared_info = object->unchecked_shared();
+      if (!shared_info->IsMarked()) {
+        Map* shared_info_map = shared_info->map();
+        MarkCompactCollector::SetMark(shared_info);
+        MarkCompactCollector::MarkObject(shared_info_map);
+        VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
+                                                   shared_info,
+                                                   true);
+      }
+    }
 
     VisitPointers(SLOT_ADDR(object,
                             JSFunction::kCodeEntryOffset + kPointerSize),
-                  SLOT_ADDR(object, JSFunction::kSize));
+                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
 
-#undef SLOT_ADDR
+    // Don't visit the next function list field as it is a weak reference.
   }
 
 
+  static void VisitSharedFunctionInfoFields(HeapObject* object,
+                                            bool flush_code_candidate) {
+    VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
+
+    if (!flush_code_candidate) {
+      VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
+    }
+
+    VisitPointers(SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
+                  SLOT_ADDR(object, SharedFunctionInfo::kSize));
+  }
+
+  #undef SLOT_ADDR
+
   typedef void (*Callback)(Map* map, HeapObject* object);
 
   static VisitorDispatchTable<Callback> table_;
@@ -612,6 +846,10 @@
     StaticMarkingVisitor::VisitCodeTarget(rinfo);
   }
 
+  void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
+  }
+
   void VisitDebugTarget(RelocInfo* rinfo) {
     StaticMarkingVisitor::VisitDebugTarget(rinfo);
   }
@@ -636,8 +874,10 @@
 
   void VisitPointer(Object** slot) {
     Object* obj = *slot;
-    if (obj->IsHeapObject()) {
-      MarkCompactCollector::MarkObject(HeapObject::cast(obj));
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
+      MarkCompactCollector::MarkObject(shared->unchecked_code());
+      MarkCompactCollector::MarkObject(shared);
     }
   }
 };
@@ -673,6 +913,7 @@
 
   SharedFunctionInfoMarkingVisitor visitor;
   CompilationCache::IterateFunctions(&visitor);
+  HandleScopeImplementer::Iterate(&visitor);
 
   ProcessMarkingStack();
 }
@@ -1040,6 +1281,11 @@
 
 void MarkCompactCollector::MarkLiveObjects() {
   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
+  // The recursive GC marker detects when it is nearing stack overflow,
+  // and switches to a different marking system.  JS interrupts interfere
+  // with the C stack limit check.
+  PostponeInterruptsScope postpone;
+
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
@@ -1096,6 +1342,9 @@
 
   // Remove object groups after marking phase.
   GlobalHandles::RemoveObjectGroups();
+
+  // Flush code from collected candidates.
+  FlushCode::ProcessCandidates();
 }
 
 
@@ -1305,8 +1554,8 @@
 }
 
 
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
-    HeapObject* ignore, int object_size) {
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(HeapObject* ignore,
+                                                            int object_size) {
   return Heap::cell_space()->MCAllocateRaw(object_size);
 }
 
@@ -2292,8 +2541,9 @@
 
   // Large objects do not move, the map word can be updated directly.
   LargeObjectIterator it(Heap::lo_space());
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     UpdatePointersInNewObject(obj);
+  }
 
   USE(live_maps_size);
   USE(live_pointer_olds_size);
diff --git a/src/math.js b/src/math.js
index fc3b132..90667d7 100644
--- a/src/math.js
+++ b/src/math.js
@@ -113,7 +113,7 @@
 // ECMA 262 - 15.8.2.10
 function MathLog(x) {
   if (!IS_NUMBER(x)) x = ToNumber(x);
-  return %Math_log(x);
+  return %_MathLog(x);
 }
 
 // ECMA 262 - 15.8.2.11
diff --git a/src/memory.h b/src/memory.h
index 27f32f7..901e78d 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -60,6 +60,10 @@
     return *reinterpret_cast<int*>(addr);
   }
 
+  static double& double_at(Address addr)  {
+    return *reinterpret_cast<double*>(addr);
+  }
+
   static Address& Address_at(Address addr)  {
     return *reinterpret_cast<Address*>(addr);
   }
diff --git a/src/messages.js b/src/messages.js
index 7f9c0f8..c19f4a9 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -190,7 +190,6 @@
       illegal_return:               "Illegal return statement",
       error_loading_debugger:       "Error loading debugger",
       no_input_to_regexp:           "No input to %0",
-      result_not_primitive:         "Result of %0 must be a primitive, was %1",
       invalid_json:                 "String '%0' is not valid JSON",
       circular_structure:           "Converting circular structure to JSON",
       obj_ctor_property_non_object: "Object.%0 called on non-object",
@@ -904,11 +903,12 @@
 
 function FormatRawStackTrace(error, raw_stack) {
   var frames = [ ];
-  for (var i = 0; i < raw_stack.length; i += 3) {
+  for (var i = 0; i < raw_stack.length; i += 4) {
     var recv = raw_stack[i];
-    var fun = raw_stack[i+1];
-    var pc = raw_stack[i+2];
-    var pos = %FunctionGetPositionForOffset(fun, pc);
+    var fun = raw_stack[i + 1];
+    var code = raw_stack[i + 2];
+    var pc = raw_stack[i + 3];
+    var pos = %FunctionGetPositionForOffset(code, pc);
     frames.push(new CallSite(recv, fun, pos));
   }
   if (IS_FUNCTION($Error.prepareStackTrace)) {
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 6b9e965..55836ce 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1533,9 +1533,9 @@
 };
 
 
-FrameMirror.prototype.evaluate = function(source, disable_break) {
+FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
   var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
-                              source, Boolean(disable_break));
+                              source, Boolean(disable_break), opt_context_object);
   return MakeMirror(result);
 };
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 69219ee..53296d9 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -35,32 +35,34 @@
 namespace v8 {
 namespace internal {
 
-#ifdef DEBUG
+#ifdef OBJECT_PRINT
 
 static const char* TypeToString(InstanceType type);
 
 
-void MaybeObject::Print() {
+void MaybeObject::Print(FILE* out) {
   Object* this_as_object;
   if (ToObject(&this_as_object)) {
     if (this_as_object->IsSmi()) {
-      Smi::cast(this_as_object)->SmiPrint();
+      Smi::cast(this_as_object)->SmiPrint(out);
     } else {
-      HeapObject::cast(this_as_object)->HeapObjectPrint();
+      HeapObject::cast(this_as_object)->HeapObjectPrint(out);
     }
   } else {
-    Failure::cast(this)->FailurePrint();
+    Failure::cast(this)->FailurePrint(out);
   }
-  Flush();
+  Flush(out);
 }
 
 
-void MaybeObject::PrintLn() {
-  Print();
-  PrintF("\n");
+void MaybeObject::PrintLn(FILE* out) {
+  Print(out);
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void MaybeObject::Verify() {
   Object* this_as_object;
   if (ToObject(&this_as_object)) {
@@ -92,114 +94,120 @@
 void Failure::FailureVerify() {
   ASSERT(IsFailure());
 }
+#endif  // DEBUG
 
 
-void HeapObject::PrintHeader(const char* id) {
-  PrintF("%p: [%s]\n", reinterpret_cast<void*>(this), id);
+#ifdef OBJECT_PRINT
+void HeapObject::PrintHeader(FILE* out, const char* id) {
+  PrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id);
 }
 
 
-void HeapObject::HeapObjectPrint() {
+void HeapObject::HeapObjectPrint(FILE* out) {
   InstanceType instance_type = map()->instance_type();
 
   HandleScope scope;
   if (instance_type < FIRST_NONSTRING_TYPE) {
-    String::cast(this)->StringPrint();
+    String::cast(this)->StringPrint(out);
     return;
   }
 
   switch (instance_type) {
     case MAP_TYPE:
-      Map::cast(this)->MapPrint();
+      Map::cast(this)->MapPrint(out);
       break;
     case HEAP_NUMBER_TYPE:
-      HeapNumber::cast(this)->HeapNumberPrint();
+      HeapNumber::cast(this)->HeapNumberPrint(out);
       break;
     case FIXED_ARRAY_TYPE:
-      FixedArray::cast(this)->FixedArrayPrint();
+      FixedArray::cast(this)->FixedArrayPrint(out);
       break;
     case BYTE_ARRAY_TYPE:
-      ByteArray::cast(this)->ByteArrayPrint();
+      ByteArray::cast(this)->ByteArrayPrint(out);
       break;
     case PIXEL_ARRAY_TYPE:
-      PixelArray::cast(this)->PixelArrayPrint();
+      PixelArray::cast(this)->PixelArrayPrint(out);
       break;
     case EXTERNAL_BYTE_ARRAY_TYPE:
-      ExternalByteArray::cast(this)->ExternalByteArrayPrint();
+      ExternalByteArray::cast(this)->ExternalByteArrayPrint(out);
       break;
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
-      ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayPrint();
+      ExternalUnsignedByteArray::cast(this)
+          ->ExternalUnsignedByteArrayPrint(out);
       break;
     case EXTERNAL_SHORT_ARRAY_TYPE:
-      ExternalShortArray::cast(this)->ExternalShortArrayPrint();
+      ExternalShortArray::cast(this)->ExternalShortArrayPrint(out);
       break;
     case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
-      ExternalUnsignedShortArray::cast(this)->ExternalUnsignedShortArrayPrint();
+      ExternalUnsignedShortArray::cast(this)
+          ->ExternalUnsignedShortArrayPrint(out);
       break;
     case EXTERNAL_INT_ARRAY_TYPE:
-      ExternalIntArray::cast(this)->ExternalIntArrayPrint();
+      ExternalIntArray::cast(this)->ExternalIntArrayPrint(out);
       break;
     case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
-      ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint();
+      ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint(out);
       break;
     case EXTERNAL_FLOAT_ARRAY_TYPE:
-      ExternalFloatArray::cast(this)->ExternalFloatArrayPrint();
+      ExternalFloatArray::cast(this)->ExternalFloatArrayPrint(out);
       break;
     case FILLER_TYPE:
-      PrintF("filler");
+      PrintF(out, "filler");
       break;
     case JS_OBJECT_TYPE:  // fall through
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:
-      JSObject::cast(this)->JSObjectPrint();
+      JSObject::cast(this)->JSObjectPrint(out);
       break;
     case ODDBALL_TYPE:
-      Oddball::cast(this)->to_string()->Print();
+      Oddball::cast(this)->to_string()->Print(out);
       break;
     case JS_FUNCTION_TYPE:
-      JSFunction::cast(this)->JSFunctionPrint();
+      JSFunction::cast(this)->JSFunctionPrint(out);
       break;
     case JS_GLOBAL_PROXY_TYPE:
-      JSGlobalProxy::cast(this)->JSGlobalProxyPrint();
+      JSGlobalProxy::cast(this)->JSGlobalProxyPrint(out);
       break;
     case JS_GLOBAL_OBJECT_TYPE:
-      JSGlobalObject::cast(this)->JSGlobalObjectPrint();
+      JSGlobalObject::cast(this)->JSGlobalObjectPrint(out);
       break;
     case JS_BUILTINS_OBJECT_TYPE:
-      JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint();
+      JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(out);
       break;
     case JS_VALUE_TYPE:
-      PrintF("Value wrapper around:");
-      JSValue::cast(this)->value()->Print();
+      PrintF(out, "Value wrapper around:");
+      JSValue::cast(this)->value()->Print(out);
       break;
     case CODE_TYPE:
-      Code::cast(this)->CodePrint();
+      Code::cast(this)->CodePrint(out);
       break;
     case PROXY_TYPE:
-      Proxy::cast(this)->ProxyPrint();
+      Proxy::cast(this)->ProxyPrint(out);
       break;
     case SHARED_FUNCTION_INFO_TYPE:
-      SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint();
+      SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
       break;
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
-      JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint();
+      JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(out);
       break;
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
   case NAME##_TYPE:                        \
-    Name::cast(this)->Name##Print();       \
+    Name::cast(this)->Name##Print(out);    \
     break;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
 
     default:
-      PrintF("UNKNOWN TYPE %d", map()->instance_type());
+      PrintF(out, "UNKNOWN TYPE %d", map()->instance_type());
       UNREACHABLE();
       break;
   }
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void HeapObject::HeapObjectVerify() {
   InstanceType instance_type = map()->instance_type();
 
@@ -312,53 +320,57 @@
 void HeapNumber::HeapNumberVerify() {
   ASSERT(IsHeapNumber());
 }
+#endif  // DEBUG
 
 
-void ByteArray::ByteArrayPrint() {
-  PrintF("byte array, data starts at %p", GetDataStartAddress());
+#ifdef OBJECT_PRINT
+void ByteArray::ByteArrayPrint(FILE* out) {
+  PrintF(out, "byte array, data starts at %p", GetDataStartAddress());
 }
 
 
-void PixelArray::PixelArrayPrint() {
-  PrintF("pixel array");
+void PixelArray::PixelArrayPrint(FILE* out) {
+  PrintF(out, "pixel array");
 }
 
 
-void ExternalByteArray::ExternalByteArrayPrint() {
-  PrintF("external byte array");
+void ExternalByteArray::ExternalByteArrayPrint(FILE* out) {
+  PrintF(out, "external byte array");
 }
 
 
-void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint() {
-  PrintF("external unsigned byte array");
+void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint(FILE* out) {
+  PrintF(out, "external unsigned byte array");
 }
 
 
-void ExternalShortArray::ExternalShortArrayPrint() {
-  PrintF("external short array");
+void ExternalShortArray::ExternalShortArrayPrint(FILE* out) {
+  PrintF(out, "external short array");
 }
 
 
-void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint() {
-  PrintF("external unsigned short array");
+void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint(FILE* out) {
+  PrintF(out, "external unsigned short array");
 }
 
 
-void ExternalIntArray::ExternalIntArrayPrint() {
-  PrintF("external int array");
+void ExternalIntArray::ExternalIntArrayPrint(FILE* out) {
+  PrintF(out, "external int array");
 }
 
 
-void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint() {
-  PrintF("external unsigned int array");
+void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint(FILE* out) {
+  PrintF(out, "external unsigned int array");
 }
 
 
-void ExternalFloatArray::ExternalFloatArrayPrint() {
-  PrintF("external float array");
+void ExternalFloatArray::ExternalFloatArrayPrint(FILE* out) {
+  PrintF(out, "external float array");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void ByteArray::ByteArrayVerify() {
   ASSERT(IsByteArray());
 }
@@ -402,38 +414,40 @@
 void ExternalFloatArray::ExternalFloatArrayVerify() {
   ASSERT(IsExternalFloatArray());
 }
+#endif  // DEBUG
 
 
-void JSObject::PrintProperties() {
+#ifdef OBJECT_PRINT
+void JSObject::PrintProperties(FILE* out) {
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
-      PrintF("   ");
-      descs->GetKey(i)->StringPrint();
-      PrintF(": ");
+      PrintF(out, "   ");
+      descs->GetKey(i)->StringPrint(out);
+      PrintF(out, ": ");
       switch (descs->GetType(i)) {
         case FIELD: {
           int index = descs->GetFieldIndex(i);
-          FastPropertyAt(index)->ShortPrint();
-          PrintF(" (field at offset %d)\n", index);
+          FastPropertyAt(index)->ShortPrint(out);
+          PrintF(out, " (field at offset %d)\n", index);
           break;
         }
         case CONSTANT_FUNCTION:
-          descs->GetConstantFunction(i)->ShortPrint();
-          PrintF(" (constant function)\n");
+          descs->GetConstantFunction(i)->ShortPrint(out);
+          PrintF(out, " (constant function)\n");
           break;
         case CALLBACKS:
-          descs->GetCallbacksObject(i)->ShortPrint();
-          PrintF(" (callback)\n");
+          descs->GetCallbacksObject(i)->ShortPrint(out);
+          PrintF(out, " (callback)\n");
           break;
         case MAP_TRANSITION:
-          PrintF(" (map transition)\n");
+          PrintF(out, " (map transition)\n");
           break;
         case CONSTANT_TRANSITION:
-          PrintF(" (constant transition)\n");
+          PrintF(out, " (constant transition)\n");
           break;
         case NULL_DESCRIPTOR:
-          PrintF(" (null descriptor)\n");
+          PrintF(out, " (null descriptor)\n");
           break;
         default:
           UNREACHABLE();
@@ -441,34 +455,34 @@
       }
     }
   } else {
-    property_dictionary()->Print();
+    property_dictionary()->Print(out);
   }
 }
 
 
-void JSObject::PrintElements() {
+void JSObject::PrintElements(FILE* out) {
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: ", i);
-        p->get(i)->ShortPrint();
-        PrintF("\n");
+        PrintF(out, "   %d: ", i);
+        p->get(i)->ShortPrint(out);
+        PrintF(out, "\n");
       }
       break;
     }
     case PIXEL_ELEMENTS: {
       PixelArray* p = PixelArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, p->get(i));
+        PrintF(out, "   %d: %d\n", i, p->get(i));
       }
       break;
     }
     case EXTERNAL_BYTE_ELEMENTS: {
       ExternalByteArray* p = ExternalByteArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
       }
       break;
     }
@@ -476,14 +490,14 @@
       ExternalUnsignedByteArray* p =
           ExternalUnsignedByteArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
       }
       break;
     }
     case EXTERNAL_SHORT_ELEMENTS: {
       ExternalShortArray* p = ExternalShortArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
       }
       break;
     }
@@ -491,14 +505,14 @@
       ExternalUnsignedShortArray* p =
           ExternalUnsignedShortArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
       }
       break;
     }
     case EXTERNAL_INT_ELEMENTS: {
       ExternalIntArray* p = ExternalIntArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
       }
       break;
     }
@@ -506,19 +520,19 @@
       ExternalUnsignedIntArray* p =
           ExternalUnsignedIntArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+        PrintF(out, "   %d: %d\n", i, static_cast<int>(p->get(i)));
       }
       break;
     }
     case EXTERNAL_FLOAT_ELEMENTS: {
       ExternalFloatArray* p = ExternalFloatArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF("   %d: %f\n", i, p->get(i));
+        PrintF(out, "   %d: %f\n", i, p->get(i));
       }
       break;
     }
     case DICTIONARY_ELEMENTS:
-      elements()->Print();
+      elements()->Print(out);
       break;
     default:
       UNREACHABLE();
@@ -527,17 +541,19 @@
 }
 
 
-void JSObject::JSObjectPrint() {
-  PrintF("%p: [JSObject]\n", reinterpret_cast<void*>(this));
-  PrintF(" - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(" - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
-  PrintF(" {\n");
-  PrintProperties();
-  PrintElements();
-  PrintF(" }\n");
+void JSObject::JSObjectPrint(FILE* out) {
+  PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
+  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
+  PrintF(out, " {\n");
+  PrintProperties(out);
+  PrintElements(out);
+  PrintF(out, " }\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void JSObject::JSObjectVerify() {
   VerifyHeapPointer(properties());
   VerifyHeapPointer(elements());
@@ -551,8 +567,10 @@
           elements()->map() == Heap::fixed_cow_array_map()));
   ASSERT(map()->has_fast_elements() == HasFastElements());
 }
+#endif  // DEBUG
 
 
+#ifdef OBJECT_PRINT
 static const char* TypeToString(InstanceType type) {
   switch (type) {
     case INVALID_TYPE: return "INVALID";
@@ -608,42 +626,44 @@
 }
 
 
-void Map::MapPrint() {
-  HeapObject::PrintHeader("Map");
-  PrintF(" - type: %s\n", TypeToString(instance_type()));
-  PrintF(" - instance size: %d\n", instance_size());
-  PrintF(" - inobject properties: %d\n", inobject_properties());
-  PrintF(" - pre-allocated property fields: %d\n",
+void Map::MapPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "Map");
+  PrintF(out, " - type: %s\n", TypeToString(instance_type()));
+  PrintF(out, " - instance size: %d\n", instance_size());
+  PrintF(out, " - inobject properties: %d\n", inobject_properties());
+  PrintF(out, " - pre-allocated property fields: %d\n",
       pre_allocated_property_fields());
-  PrintF(" - unused property fields: %d\n", unused_property_fields());
+  PrintF(out, " - unused property fields: %d\n", unused_property_fields());
   if (is_hidden_prototype()) {
-    PrintF(" - hidden_prototype\n");
+    PrintF(out, " - hidden_prototype\n");
   }
   if (has_named_interceptor()) {
-    PrintF(" - named_interceptor\n");
+    PrintF(out, " - named_interceptor\n");
   }
   if (has_indexed_interceptor()) {
-    PrintF(" - indexed_interceptor\n");
+    PrintF(out, " - indexed_interceptor\n");
   }
   if (is_undetectable()) {
-    PrintF(" - undetectable\n");
+    PrintF(out, " - undetectable\n");
   }
   if (has_instance_call_handler()) {
-    PrintF(" - instance_call_handler\n");
+    PrintF(out, " - instance_call_handler\n");
   }
   if (is_access_check_needed()) {
-    PrintF(" - access_check_needed\n");
+    PrintF(out, " - access_check_needed\n");
   }
-  PrintF(" - instance descriptors: ");
-  instance_descriptors()->ShortPrint();
-  PrintF("\n - prototype: ");
-  prototype()->ShortPrint();
-  PrintF("\n - constructor: ");
-  constructor()->ShortPrint();
-  PrintF("\n");
+  PrintF(out, " - instance descriptors: ");
+  instance_descriptors()->ShortPrint(out);
+  PrintF(out, "\n - prototype: ");
+  prototype()->ShortPrint(out);
+  PrintF(out, "\n - constructor: ");
+  constructor()->ShortPrint(out);
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void Map::MapVerify() {
   ASSERT(!Heap::InNewSpace(this));
   ASSERT(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
@@ -665,17 +685,21 @@
   ASSERT_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
       visitor_id());
 }
+#endif  // DEBUG
 
 
-void CodeCache::CodeCachePrint() {
-  HeapObject::PrintHeader("CodeCache");
-  PrintF("\n - default_cache: ");
-  default_cache()->ShortPrint();
-  PrintF("\n - normal_type_cache: ");
-  normal_type_cache()->ShortPrint();
+#ifdef OBJECT_PRINT
+void CodeCache::CodeCachePrint(FILE* out) {
+  HeapObject::PrintHeader(out, "CodeCache");
+  PrintF(out, "\n - default_cache: ");
+  default_cache()->ShortPrint(out);
+  PrintF(out, "\n - normal_type_cache: ");
+  normal_type_cache()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void CodeCache::CodeCacheVerify() {
   VerifyHeapPointer(default_cache());
   VerifyHeapPointer(normal_type_cache());
@@ -683,19 +707,23 @@
   ASSERT(normal_type_cache()->IsUndefined()
          || normal_type_cache()->IsCodeCacheHashTable());
 }
+#endif  // DEBUG
 
 
-void FixedArray::FixedArrayPrint() {
-  HeapObject::PrintHeader("FixedArray");
-  PrintF(" - length: %d", length());
+#ifdef OBJECT_PRINT
+void FixedArray::FixedArrayPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "FixedArray");
+  PrintF(out, " - length: %d", length());
   for (int i = 0; i < length(); i++) {
-    PrintF("\n  [%d]: ", i);
-    get(i)->ShortPrint();
+    PrintF(out, "\n  [%d]: ", i);
+    get(i)->ShortPrint(out);
   }
-  PrintF("\n");
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void FixedArray::FixedArrayVerify() {
   for (int i = 0; i < length(); i++) {
     Object* e = get(i);
@@ -706,39 +734,57 @@
     }
   }
 }
+#endif  // DEBUG
 
 
-void JSValue::JSValuePrint() {
-  HeapObject::PrintHeader("ValueObject");
-  value()->Print();
+#ifdef OBJECT_PRINT
+void JSValue::JSValuePrint(FILE* out) {
+  HeapObject::PrintHeader(out, "ValueObject");
+  value()->Print(out);
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void JSValue::JSValueVerify() {
   Object* v = value();
   if (v->IsHeapObject()) {
     VerifyHeapPointer(v);
   }
 }
+#endif  // DEBUG
 
 
-void String::StringPrint() {
+#ifdef OBJECT_PRINT
+void String::StringPrint(FILE* out) {
   if (StringShape(this).IsSymbol()) {
-    PrintF("#");
+    PrintF(out, "#");
   } else if (StringShape(this).IsCons()) {
-    PrintF("c\"");
+    PrintF(out, "c\"");
   } else {
-    PrintF("\"");
+    PrintF(out, "\"");
   }
 
-  for (int i = 0; i < length(); i++) {
-    PrintF("%c", Get(i));
+  const char truncated_epilogue[] = "...<truncated>";
+  int len = length();
+  if (!FLAG_use_verbose_printer) {
+    if (len > 100) {
+      len = 100 - sizeof(truncated_epilogue);
+    }
+  }
+  for (int i = 0; i < len; i++) {
+    PrintF(out, "%c", Get(i));
+  }
+  if (len != length()) {
+    PrintF(out, "%s", truncated_epilogue);
   }
 
-  if (!StringShape(this).IsSymbol()) PrintF("\"");
+  if (!StringShape(this).IsSymbol()) PrintF(out, "\"");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void String::StringVerify() {
   CHECK(IsString());
   CHECK(length() >= 0 && length() <= Smi::kMaxValue);
@@ -746,66 +792,78 @@
     CHECK(!Heap::InNewSpace(this));
   }
 }
+#endif  // DEBUG
 
 
-void JSFunction::JSFunctionPrint() {
-  HeapObject::PrintHeader("Function");
-  PrintF(" - map = 0x%p\n", reinterpret_cast<void*>(map()));
-  PrintF(" - initial_map = ");
+#ifdef OBJECT_PRINT
+void JSFunction::JSFunctionPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "Function");
+  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - initial_map = ");
   if (has_initial_map()) {
-    initial_map()->ShortPrint();
+    initial_map()->ShortPrint(out);
   }
-  PrintF("\n - shared_info = ");
-  shared()->ShortPrint();
-  PrintF("\n   - name = ");
-  shared()->name()->Print();
-  PrintF("\n - context = ");
-  unchecked_context()->ShortPrint();
-  PrintF("\n - code = ");
-  code()->ShortPrint();
-  PrintF("\n");
+  PrintF(out, "\n - shared_info = ");
+  shared()->ShortPrint(out);
+  PrintF(out, "\n   - name = ");
+  shared()->name()->Print(out);
+  PrintF(out, "\n - context = ");
+  unchecked_context()->ShortPrint(out);
+  PrintF(out, "\n - code = ");
+  code()->ShortPrint(out);
+  PrintF(out, "\n");
 
-  PrintProperties();
-  PrintElements();
+  PrintProperties(out);
+  PrintElements(out);
 
-  PrintF("\n");
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void JSFunction::JSFunctionVerify() {
   CHECK(IsJSFunction());
   VerifyObjectField(kPrototypeOrInitialMapOffset);
+  VerifyObjectField(kNextFunctionLinkOffset);
+  CHECK(next_function_link()->IsUndefined() ||
+        next_function_link()->IsJSFunction());
 }
+#endif  // DEBUG
 
 
-void SharedFunctionInfo::SharedFunctionInfoPrint() {
-  HeapObject::PrintHeader("SharedFunctionInfo");
-  PrintF(" - name: ");
-  name()->ShortPrint();
-  PrintF("\n - expected_nof_properties: %d", expected_nof_properties());
-  PrintF("\n - instance class name = ");
-  instance_class_name()->Print();
-  PrintF("\n - code = ");
-  code()->ShortPrint();
-  PrintF("\n - source code = ");
-  GetSourceCode()->ShortPrint();
+#ifdef OBJECT_PRINT
+void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "SharedFunctionInfo");
+  PrintF(out, " - name: ");
+  name()->ShortPrint(out);
+  PrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
+  PrintF(out, "\n - instance class name = ");
+  instance_class_name()->Print(out);
+  PrintF(out, "\n - code = ");
+  code()->ShortPrint(out);
+  PrintF(out, "\n - source code = ");
+  GetSourceCode()->ShortPrint(out);
   // Script files are often large, hard to read.
-  // PrintF("\n - script =");
-  // script()->Print();
-  PrintF("\n - function token position = %d", function_token_position());
-  PrintF("\n - start position = %d", start_position());
-  PrintF("\n - end position = %d", end_position());
-  PrintF("\n - is expression = %d", is_expression());
-  PrintF("\n - debug info = ");
-  debug_info()->ShortPrint();
-  PrintF("\n - length = %d", length());
-  PrintF("\n - has_only_simple_this_property_assignments = %d",
+  // PrintF(out, "\n - script =");
+  // script()->Print(out);
+  PrintF(out, "\n - function token position = %d", function_token_position());
+  PrintF(out, "\n - start position = %d", start_position());
+  PrintF(out, "\n - end position = %d", end_position());
+  PrintF(out, "\n - is expression = %d", is_expression());
+  PrintF(out, "\n - debug info = ");
+  debug_info()->ShortPrint(out);
+  PrintF(out, "\n - length = %d", length());
+  PrintF(out, "\n - has_only_simple_this_property_assignments = %d",
          has_only_simple_this_property_assignments());
-  PrintF("\n - this_property_assignments = ");
-  this_property_assignments()->ShortPrint();
-  PrintF("\n");
+  PrintF(out, "\n - this_property_assignments = ");
+  this_property_assignments()->ShortPrint(out);
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void SharedFunctionInfo::SharedFunctionInfoVerify() {
   CHECK(IsSharedFunctionInfo());
   VerifyObjectField(kNameOffset);
@@ -816,17 +874,21 @@
   VerifyObjectField(kScriptOffset);
   VerifyObjectField(kDebugInfoOffset);
 }
+#endif  // DEBUG
 
 
-void JSGlobalProxy::JSGlobalProxyPrint() {
-  PrintF("global_proxy");
-  JSObjectPrint();
-  PrintF("context : ");
-  context()->ShortPrint();
-  PrintF("\n");
+#ifdef OBJECT_PRINT
+void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
+  PrintF(out, "global_proxy");
+  JSObjectPrint(out);
+  PrintF(out, "context : ");
+  context()->ShortPrint(out);
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void JSGlobalProxy::JSGlobalProxyVerify() {
   CHECK(IsJSGlobalProxy());
   JSObjectVerify();
@@ -836,17 +898,21 @@
   CHECK(HasFastElements());
   CHECK_EQ(0, FixedArray::cast(elements())->length());
 }
+#endif  // DEBUG
 
 
-void JSGlobalObject::JSGlobalObjectPrint() {
-  PrintF("global ");
-  JSObjectPrint();
-  PrintF("global context : ");
-  global_context()->ShortPrint();
-  PrintF("\n");
+#ifdef OBJECT_PRINT
+void JSGlobalObject::JSGlobalObjectPrint(FILE* out) {
+  PrintF(out, "global ");
+  JSObjectPrint(out);
+  PrintF(out, "global context : ");
+  global_context()->ShortPrint(out);
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void JSGlobalObject::JSGlobalObjectVerify() {
   CHECK(IsJSGlobalObject());
   JSObjectVerify();
@@ -856,14 +922,18 @@
     VerifyObjectField(i);
   }
 }
+#endif  // DEBUG
 
 
-void JSBuiltinsObject::JSBuiltinsObjectPrint() {
-  PrintF("builtins ");
-  JSObjectPrint();
+#ifdef OBJECT_PRINT
+void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
+  PrintF(out, "builtins ");
+  JSObjectPrint(out);
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void JSBuiltinsObject::JSBuiltinsObjectVerify() {
   CHECK(IsJSBuiltinsObject());
   JSObjectVerify();
@@ -894,21 +964,27 @@
   CHECK(IsJSGlobalPropertyCell());
   VerifyObjectField(kValueOffset);
 }
+#endif  // DEBUG
 
 
-void JSGlobalPropertyCell::JSGlobalPropertyCellPrint() {
-  HeapObject::PrintHeader("JSGlobalPropertyCell");
+#ifdef OBJECT_PRINT
+void JSGlobalPropertyCell::JSGlobalPropertyCellPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "JSGlobalPropertyCell");
 }
 
 
-void Code::CodePrint() {
-  HeapObject::PrintHeader("Code");
+void Code::CodePrint(FILE* out) {
+  HeapObject::PrintHeader(out, "Code");
 #ifdef ENABLE_DISASSEMBLER
-  Disassemble(NULL);
+  if (FLAG_use_verbose_printer) {
+    Disassemble(NULL, out);
+  }
 #endif
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void Code::CodeVerify() {
   CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
                   kCodeAlignment));
@@ -963,13 +1039,17 @@
       break;
   }
 }
+#endif  // DEBUG
 
 
-void Proxy::ProxyPrint() {
-  PrintF("proxy to %p", proxy());
+#ifdef OBJECT_PRINT
+void Proxy::ProxyPrint(FILE* out) {
+  PrintF(out, "proxy to %p", proxy());
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void Proxy::ProxyVerify() {
   ASSERT(IsProxy());
 }
@@ -983,38 +1063,50 @@
   VerifyPointer(data());
   VerifyPointer(flag());
 }
+#endif  // DEBUG
 
-void AccessorInfo::AccessorInfoPrint() {
-  HeapObject::PrintHeader("AccessorInfo");
-  PrintF("\n - getter: ");
-  getter()->ShortPrint();
-  PrintF("\n - setter: ");
-  setter()->ShortPrint();
-  PrintF("\n - name: ");
-  name()->ShortPrint();
-  PrintF("\n - data: ");
-  data()->ShortPrint();
-  PrintF("\n - flag: ");
-  flag()->ShortPrint();
+
+#ifdef OBJECT_PRINT
+void AccessorInfo::AccessorInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "AccessorInfo");
+  PrintF(out, "\n - getter: ");
+  getter()->ShortPrint(out);
+  PrintF(out, "\n - setter: ");
+  setter()->ShortPrint(out);
+  PrintF(out, "\n - name: ");
+  name()->ShortPrint(out);
+  PrintF(out, "\n - data: ");
+  data()->ShortPrint(out);
+  PrintF(out, "\n - flag: ");
+  flag()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void AccessCheckInfo::AccessCheckInfoVerify() {
   CHECK(IsAccessCheckInfo());
   VerifyPointer(named_callback());
   VerifyPointer(indexed_callback());
   VerifyPointer(data());
 }
+#endif  // DEBUG
 
-void AccessCheckInfo::AccessCheckInfoPrint() {
-  HeapObject::PrintHeader("AccessCheckInfo");
-  PrintF("\n - named_callback: ");
-  named_callback()->ShortPrint();
-  PrintF("\n - indexed_callback: ");
-  indexed_callback()->ShortPrint();
-  PrintF("\n - data: ");
-  data()->ShortPrint();
+
+#ifdef OBJECT_PRINT
+void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "AccessCheckInfo");
+  PrintF(out, "\n - named_callback: ");
+  named_callback()->ShortPrint(out);
+  PrintF(out, "\n - indexed_callback: ");
+  indexed_callback()->ShortPrint(out);
+  PrintF(out, "\n - data: ");
+  data()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void InterceptorInfo::InterceptorInfoVerify() {
   CHECK(IsInterceptorInfo());
   VerifyPointer(getter());
@@ -1024,38 +1116,50 @@
   VerifyPointer(enumerator());
   VerifyPointer(data());
 }
+#endif  // DEBUG
 
-void InterceptorInfo::InterceptorInfoPrint() {
-  HeapObject::PrintHeader("InterceptorInfo");
-  PrintF("\n - getter: ");
-  getter()->ShortPrint();
-  PrintF("\n - setter: ");
-  setter()->ShortPrint();
-  PrintF("\n - query: ");
-  query()->ShortPrint();
-  PrintF("\n - deleter: ");
-  deleter()->ShortPrint();
-  PrintF("\n - enumerator: ");
-  enumerator()->ShortPrint();
-  PrintF("\n - data: ");
-  data()->ShortPrint();
+
+#ifdef OBJECT_PRINT
+void InterceptorInfo::InterceptorInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "InterceptorInfo");
+  PrintF(out, "\n - getter: ");
+  getter()->ShortPrint(out);
+  PrintF(out, "\n - setter: ");
+  setter()->ShortPrint(out);
+  PrintF(out, "\n - query: ");
+  query()->ShortPrint(out);
+  PrintF(out, "\n - deleter: ");
+  deleter()->ShortPrint(out);
+  PrintF(out, "\n - enumerator: ");
+  enumerator()->ShortPrint(out);
+  PrintF(out, "\n - data: ");
+  data()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void CallHandlerInfo::CallHandlerInfoVerify() {
   CHECK(IsCallHandlerInfo());
   VerifyPointer(callback());
   VerifyPointer(data());
 }
+#endif  // DEBUG
 
-void CallHandlerInfo::CallHandlerInfoPrint() {
-  HeapObject::PrintHeader("CallHandlerInfo");
-  PrintF("\n - callback: ");
-  callback()->ShortPrint();
-  PrintF("\n - data: ");
-  data()->ShortPrint();
-  PrintF("\n - call_stub_cache: ");
+
+#ifdef OBJECT_PRINT
+void CallHandlerInfo::CallHandlerInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "CallHandlerInfo");
+  PrintF(out, "\n - callback: ");
+  callback()->ShortPrint(out);
+  PrintF(out, "\n - data: ");
+  data()->ShortPrint(out);
+  PrintF(out, "\n - call_stub_cache: ");
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void TemplateInfo::TemplateInfoVerify() {
   VerifyPointer(tag());
   VerifyPointer(property_list());
@@ -1075,81 +1179,106 @@
   VerifyPointer(signature());
   VerifyPointer(access_check_info());
 }
+#endif  // DEBUG
 
-void FunctionTemplateInfo::FunctionTemplateInfoPrint() {
-  HeapObject::PrintHeader("FunctionTemplateInfo");
-  PrintF("\n - class name: ");
-  class_name()->ShortPrint();
-  PrintF("\n - tag: ");
-  tag()->ShortPrint();
-  PrintF("\n - property_list: ");
-  property_list()->ShortPrint();
-  PrintF("\n - serial_number: ");
-  serial_number()->ShortPrint();
-  PrintF("\n - call_code: ");
-  call_code()->ShortPrint();
-  PrintF("\n - property_accessors: ");
-  property_accessors()->ShortPrint();
-  PrintF("\n - prototype_template: ");
-  prototype_template()->ShortPrint();
-  PrintF("\n - parent_template: ");
-  parent_template()->ShortPrint();
-  PrintF("\n - named_property_handler: ");
-  named_property_handler()->ShortPrint();
-  PrintF("\n - indexed_property_handler: ");
-  indexed_property_handler()->ShortPrint();
-  PrintF("\n - instance_template: ");
-  instance_template()->ShortPrint();
-  PrintF("\n - signature: ");
-  signature()->ShortPrint();
-  PrintF("\n - access_check_info: ");
-  access_check_info()->ShortPrint();
-  PrintF("\n - hidden_prototype: %s", hidden_prototype() ? "true" : "false");
-  PrintF("\n - undetectable: %s", undetectable() ? "true" : "false");
-  PrintF("\n - need_access_check: %s", needs_access_check() ? "true" : "false");
+
+#ifdef OBJECT_PRINT
+void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "FunctionTemplateInfo");
+  PrintF(out, "\n - class name: ");
+  class_name()->ShortPrint(out);
+  PrintF(out, "\n - tag: ");
+  tag()->ShortPrint(out);
+  PrintF(out, "\n - property_list: ");
+  property_list()->ShortPrint(out);
+  PrintF(out, "\n - serial_number: ");
+  serial_number()->ShortPrint(out);
+  PrintF(out, "\n - call_code: ");
+  call_code()->ShortPrint(out);
+  PrintF(out, "\n - property_accessors: ");
+  property_accessors()->ShortPrint(out);
+  PrintF(out, "\n - prototype_template: ");
+  prototype_template()->ShortPrint(out);
+  PrintF(out, "\n - parent_template: ");
+  parent_template()->ShortPrint(out);
+  PrintF(out, "\n - named_property_handler: ");
+  named_property_handler()->ShortPrint(out);
+  PrintF(out, "\n - indexed_property_handler: ");
+  indexed_property_handler()->ShortPrint(out);
+  PrintF(out, "\n - instance_template: ");
+  instance_template()->ShortPrint(out);
+  PrintF(out, "\n - signature: ");
+  signature()->ShortPrint(out);
+  PrintF(out, "\n - access_check_info: ");
+  access_check_info()->ShortPrint(out);
+  PrintF(out, "\n - hidden_prototype: %s",
+         hidden_prototype() ? "true" : "false");
+  PrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false");
+  PrintF(out, "\n - need_access_check: %s",
+         needs_access_check() ? "true" : "false");
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void ObjectTemplateInfo::ObjectTemplateInfoVerify() {
   CHECK(IsObjectTemplateInfo());
   TemplateInfoVerify();
   VerifyPointer(constructor());
   VerifyPointer(internal_field_count());
 }
+#endif  // DEBUG
 
-void ObjectTemplateInfo::ObjectTemplateInfoPrint() {
-  HeapObject::PrintHeader("ObjectTemplateInfo");
-  PrintF("\n - constructor: ");
-  constructor()->ShortPrint();
-  PrintF("\n - internal_field_count: ");
-  internal_field_count()->ShortPrint();
+
+#ifdef OBJECT_PRINT
+void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "ObjectTemplateInfo");
+  PrintF(out, "\n - constructor: ");
+  constructor()->ShortPrint(out);
+  PrintF(out, "\n - internal_field_count: ");
+  internal_field_count()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void SignatureInfo::SignatureInfoVerify() {
   CHECK(IsSignatureInfo());
   VerifyPointer(receiver());
   VerifyPointer(args());
 }
+#endif  // DEBUG
 
-void SignatureInfo::SignatureInfoPrint() {
-  HeapObject::PrintHeader("SignatureInfo");
-  PrintF("\n - receiver: ");
-  receiver()->ShortPrint();
-  PrintF("\n - args: ");
-  args()->ShortPrint();
+
+#ifdef OBJECT_PRINT
+void SignatureInfo::SignatureInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "SignatureInfo");
+  PrintF(out, "\n - receiver: ");
+  receiver()->ShortPrint(out);
+  PrintF(out, "\n - args: ");
+  args()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
+
+#ifdef DEBUG
 void TypeSwitchInfo::TypeSwitchInfoVerify() {
   CHECK(IsTypeSwitchInfo());
   VerifyPointer(types());
 }
+#endif  // DEBUG
 
-void TypeSwitchInfo::TypeSwitchInfoPrint() {
-  HeapObject::PrintHeader("TypeSwitchInfo");
-  PrintF("\n - types: ");
-  types()->ShortPrint();
+
+#ifdef OBJECT_PRINT
+void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "TypeSwitchInfo");
+  PrintF(out, "\n - types: ");
+  types()->ShortPrint(out);
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void Script::ScriptVerify() {
   CHECK(IsScript());
   VerifyPointer(source());
@@ -1162,41 +1291,45 @@
   VerifyPointer(line_ends());
   VerifyPointer(id());
 }
+#endif  // DEBUG
 
 
-void Script::ScriptPrint() {
-  HeapObject::PrintHeader("Script");
-  PrintF("\n - source: ");
-  source()->ShortPrint();
-  PrintF("\n - name: ");
-  name()->ShortPrint();
-  PrintF("\n - line_offset: ");
-  line_offset()->ShortPrint();
-  PrintF("\n - column_offset: ");
-  column_offset()->ShortPrint();
-  PrintF("\n - type: ");
-  type()->ShortPrint();
-  PrintF("\n - id: ");
-  id()->ShortPrint();
-  PrintF("\n - data: ");
-  data()->ShortPrint();
-  PrintF("\n - context data: ");
-  context_data()->ShortPrint();
-  PrintF("\n - wrapper: ");
-  wrapper()->ShortPrint();
-  PrintF("\n - compilation type: ");
-  compilation_type()->ShortPrint();
-  PrintF("\n - line ends: ");
-  line_ends()->ShortPrint();
-  PrintF("\n - eval from shared: ");
-  eval_from_shared()->ShortPrint();
-  PrintF("\n - eval from instructions offset: ");
-  eval_from_instructions_offset()->ShortPrint();
-  PrintF("\n");
+#ifdef OBJECT_PRINT
+void Script::ScriptPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "Script");
+  PrintF(out, "\n - source: ");
+  source()->ShortPrint(out);
+  PrintF(out, "\n - name: ");
+  name()->ShortPrint(out);
+  PrintF(out, "\n - line_offset: ");
+  line_offset()->ShortPrint(out);
+  PrintF(out, "\n - column_offset: ");
+  column_offset()->ShortPrint(out);
+  PrintF(out, "\n - type: ");
+  type()->ShortPrint(out);
+  PrintF(out, "\n - id: ");
+  id()->ShortPrint(out);
+  PrintF(out, "\n - data: ");
+  data()->ShortPrint(out);
+  PrintF(out, "\n - context data: ");
+  context_data()->ShortPrint(out);
+  PrintF(out, "\n - wrapper: ");
+  wrapper()->ShortPrint(out);
+  PrintF(out, "\n - compilation type: ");
+  compilation_type()->ShortPrint(out);
+  PrintF(out, "\n - line ends: ");
+  line_ends()->ShortPrint(out);
+  PrintF(out, "\n - eval from shared: ");
+  eval_from_shared()->ShortPrint(out);
+  PrintF(out, "\n - eval from instructions offset: ");
+  eval_from_instructions_offset()->ShortPrint(out);
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
+#ifdef DEBUG
 void DebugInfo::DebugInfoVerify() {
   CHECK(IsDebugInfo());
   VerifyPointer(shared());
@@ -1204,21 +1337,25 @@
   VerifyPointer(code());
   VerifyPointer(break_points());
 }
+#endif  // DEBUG
 
 
-void DebugInfo::DebugInfoPrint() {
-  HeapObject::PrintHeader("DebugInfo");
-  PrintF("\n - shared: ");
-  shared()->ShortPrint();
-  PrintF("\n - original_code: ");
-  original_code()->ShortPrint();
-  PrintF("\n - code: ");
-  code()->ShortPrint();
-  PrintF("\n - break_points: ");
-  break_points()->Print();
+#ifdef OBJECT_PRINT
+void DebugInfo::DebugInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "DebugInfo");
+  PrintF(out, "\n - shared: ");
+  shared()->ShortPrint(out);
+  PrintF(out, "\n - original_code: ");
+  original_code()->ShortPrint(out);
+  PrintF(out, "\n - code: ");
+  code()->ShortPrint(out);
+  PrintF(out, "\n - break_points: ");
+  break_points()->Print(out);
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 void BreakPointInfo::BreakPointInfoVerify() {
   CHECK(IsBreakPointInfo());
   code_position()->SmiVerify();
@@ -1226,19 +1363,23 @@
   statement_position()->SmiVerify();
   VerifyPointer(break_point_objects());
 }
+#endif  // DEBUG
 
 
-void BreakPointInfo::BreakPointInfoPrint() {
-  HeapObject::PrintHeader("BreakPointInfo");
-  PrintF("\n - code_position: %d", code_position()->value());
-  PrintF("\n - source_position: %d", source_position()->value());
-  PrintF("\n - statement_position: %d", statement_position()->value());
-  PrintF("\n - break_point_objects: ");
-  break_point_objects()->ShortPrint();
+#ifdef OBJECT_PRINT
+void BreakPointInfo::BreakPointInfoPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "BreakPointInfo");
+  PrintF(out, "\n - code_position: %d", code_position()->value());
+  PrintF(out, "\n - source_position: %d", source_position()->value());
+  PrintF(out, "\n - statement_position: %d", statement_position()->value());
+  PrintF(out, "\n - break_point_objects: ");
+  break_point_objects()->ShortPrint(out);
 }
-#endif
+#endif  // OBJECT_PRINT
+#endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+#ifdef DEBUG
 void JSObject::IncrementSpillStatistics(SpillInformation* info) {
   info->number_of_objects_++;
   // Named properties
@@ -1321,20 +1462,24 @@
 
   PrintF("\n");
 }
+#endif  // DEBUG
 
 
-void DescriptorArray::PrintDescriptors() {
-  PrintF("Descriptor array  %d\n", number_of_descriptors());
+#ifdef OBJECT_PRINT
+void DescriptorArray::PrintDescriptors(FILE* out) {
+  PrintF(out, "Descriptor array  %d\n", number_of_descriptors());
   for (int i = 0; i < number_of_descriptors(); i++) {
-    PrintF(" %d: ", i);
+    PrintF(out, " %d: ", i);
     Descriptor desc;
     Get(i, &desc);
-    desc.Print();
+    desc.Print(out);
   }
-  PrintF("\n");
+  PrintF(out, "\n");
 }
+#endif  // OBJECT_PRINT
 
 
+#ifdef DEBUG
 bool DescriptorArray::IsSortedNoDuplicates() {
   String* current_key = NULL;
   uint32_t current = 0;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 499cb91..7935912 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -459,6 +459,33 @@
 }
 
 
+bool Object::IsDeoptimizationInputData() {
+  // Must be a fixed array.
+  if (!IsFixedArray()) return false;
+
+  // There's no sure way to detect the difference between a fixed array and
+  // a deoptimization data array.  Since this is used for asserts we can
+  // check that the length is zero or else the fixed size plus a multiple of
+  // the entry size.
+  int length = FixedArray::cast(this)->length();
+  if (length == 0) return true;
+
+  length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
+  return length >= 0 &&
+      length % DeoptimizationInputData::kDeoptEntrySize == 0;
+}
+
+
+bool Object::IsDeoptimizationOutputData() {
+  if (!IsFixedArray()) return false;
+  // There's actually no way to see the difference between a fixed array and
+  // a deoptimization data array.  Since this is used for asserts we can check
+  // that the length is plausible though.
+  if (FixedArray::cast(this)->length() % 2 != 0) return false;
+  return true;
+}
+
+
 bool Object::IsContext() {
   return Object::IsHeapObject()
     && (HeapObject::cast(this)->map() == Heap::context_map() ||
@@ -1682,6 +1709,8 @@
 
 CAST_ACCESSOR(FixedArray)
 CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(DeoptimizationInputData)
+CAST_ACCESSOR(DeoptimizationOutputData)
 CAST_ACCESSOR(SymbolTable)
 CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(NormalizedMapCache)
@@ -2376,18 +2405,160 @@
 
 
 int Code::major_key() {
-  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
+  ASSERT(kind() == STUB ||
+         kind() == BINARY_OP_IC ||
+         kind() == TYPE_RECORDING_BINARY_OP_IC ||
+         kind() == COMPARE_IC);
   return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
 }
 
 
 void Code::set_major_key(int major) {
-  ASSERT(kind() == STUB || kind() == BINARY_OP_IC);
+  ASSERT(kind() == STUB ||
+         kind() == BINARY_OP_IC ||
+         kind() == TYPE_RECORDING_BINARY_OP_IC ||
+         kind() == COMPARE_IC);
   ASSERT(0 <= major && major < 256);
   WRITE_BYTE_FIELD(this, kStubMajorKeyOffset, major);
 }
 
 
+bool Code::optimizable() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
+}
+
+
+void Code::set_optimizable(bool value) {
+  ASSERT(kind() == FUNCTION);
+  WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
+}
+
+
+bool Code::has_deoptimization_support() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kHasDeoptimizationSupportOffset) == 1;
+}
+
+
+void Code::set_has_deoptimization_support(bool value) {
+  ASSERT(kind() == FUNCTION);
+  WRITE_BYTE_FIELD(this, kHasDeoptimizationSupportOffset, value ? 1 : 0);
+}
+
+
+int Code::allow_osr_at_loop_nesting_level() {
+  ASSERT(kind() == FUNCTION);
+  return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
+}
+
+
+void Code::set_allow_osr_at_loop_nesting_level(int level) {
+  ASSERT(kind() == FUNCTION);
+  ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
+  WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
+}
+
+
+unsigned Code::stack_slots() {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  return READ_UINT32_FIELD(this, kStackSlotsOffset);
+}
+
+
+void Code::set_stack_slots(unsigned slots) {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  WRITE_UINT32_FIELD(this, kStackSlotsOffset, slots);
+}
+
+
+unsigned Code::safepoint_table_start() {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  return READ_UINT32_FIELD(this, kSafepointTableStartOffset);
+}
+
+
+void Code::set_safepoint_table_start(unsigned offset) {
+  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+  WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset);
+}
+
+
+unsigned Code::stack_check_table_start() {
+  ASSERT(kind() == FUNCTION);
+  return READ_UINT32_FIELD(this, kStackCheckTableStartOffset);
+}
+
+
+void Code::set_stack_check_table_start(unsigned offset) {
+  ASSERT(kind() == FUNCTION);
+  ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+  WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset);
+}
+
+
+CheckType Code::check_type() {
+  ASSERT(is_call_stub() || is_keyed_call_stub());
+  byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
+  return static_cast<CheckType>(type);
+}
+
+
+void Code::set_check_type(CheckType value) {
+  ASSERT(is_call_stub() || is_keyed_call_stub());
+  WRITE_BYTE_FIELD(this, kCheckTypeOffset, value);
+}
+
+
+byte Code::binary_op_type() {
+  ASSERT(is_binary_op_stub());
+  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+}
+
+
+void Code::set_binary_op_type(byte value) {
+  ASSERT(is_binary_op_stub());
+  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+}
+
+
+byte Code::type_recording_binary_op_type() {
+  ASSERT(is_type_recording_binary_op_stub());
+  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
+}
+
+
+void Code::set_type_recording_binary_op_type(byte value) {
+  ASSERT(is_type_recording_binary_op_stub());
+  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
+}
+
+
+byte Code::type_recording_binary_op_result_type() {
+  ASSERT(is_type_recording_binary_op_stub());
+  return READ_BYTE_FIELD(this, kBinaryOpReturnTypeOffset);
+}
+
+
+void Code::set_type_recording_binary_op_result_type(byte value) {
+  ASSERT(is_type_recording_binary_op_stub());
+  WRITE_BYTE_FIELD(this, kBinaryOpReturnTypeOffset, value);
+}
+
+
+byte Code::compare_state() {
+  ASSERT(is_compare_ic_stub());
+  return READ_BYTE_FIELD(this, kCompareStateOffset);
+}
+
+
+void Code::set_compare_state(byte value) {
+  ASSERT(is_compare_ic_stub());
+  WRITE_BYTE_FIELD(this, kCompareStateOffset, value);
+}
+
+
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -2530,6 +2701,7 @@
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
 ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
+ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -2667,6 +2839,7 @@
               kCompilerHintsOffset)
 SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
               kThisPropertyAssignmentsCountOffset)
+SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
 #else
 
 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset)             \
@@ -2716,6 +2889,7 @@
 PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
                         this_property_assignments_count,
                         kThisPropertyAssignmentsCountOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
 #endif
 
 
@@ -2749,6 +2923,23 @@
 }
 
 
+bool SharedFunctionInfo::optimization_disabled() {
+  return BooleanBit::get(compiler_hints(), kOptimizationDisabled);
+}
+
+
+void SharedFunctionInfo::set_optimization_disabled(bool disable) {
+  set_compiler_hints(BooleanBit::set(compiler_hints(),
+                                     kOptimizationDisabled,
+                                     disable));
+  // If disabling optimizations we reflect that in the code object so
+  // it will not be counted as optimizable code.
+  if ((code()->kind() == Code::FUNCTION) && disable) {
+    code()->set_optimizable(false);
+  }
+}
+
+
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
@@ -2794,6 +2985,13 @@
 
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
+  // If optimization has been disabled for the shared function info,
+  // reflect that in the code object so it will not be counted as
+  // optimizable code.
+  ASSERT(value->kind() != Code::FUNCTION ||
+         !value->optimizable() ||
+         this->code() == Builtins::builtin(Builtins::Illegal) ||
+         this->allows_lazy_compilation());
   WRITE_FIELD(this, kCodeOffset, value);
   CONDITIONAL_WRITE_BARRIER(this, kCodeOffset, mode);
 }
@@ -2812,6 +3010,16 @@
 }
 
 
+Smi* SharedFunctionInfo::deopt_counter() {
+  return reinterpret_cast<Smi*>(READ_FIELD(this, kDeoptCounterOffset));
+}
+
+
+void SharedFunctionInfo::set_deopt_counter(Smi* value) {
+  WRITE_FIELD(this, kDeoptCounterOffset, value);
+}
+
+
 bool SharedFunctionInfo::is_compiled() {
   return code() != Builtins::builtin(Builtins::LazyCompile);
 }
@@ -2828,14 +3036,20 @@
 }
 
 
-bool SharedFunctionInfo::HasCustomCallGenerator() {
+bool SharedFunctionInfo::HasBuiltinFunctionId() {
   return function_data()->IsSmi();
 }
 
 
-int SharedFunctionInfo::custom_call_generator_id() {
-  ASSERT(HasCustomCallGenerator());
-  return Smi::cast(function_data())->value();
+bool SharedFunctionInfo::IsBuiltinMathFunction() {
+  return HasBuiltinFunctionId() &&
+      builtin_function_id() >= kFirstMathFunctionId;
+}
+
+
+BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
+  ASSERT(HasBuiltinFunctionId());
+  return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
 }
 
 
@@ -2850,11 +3064,33 @@
 }
 
 
+bool SharedFunctionInfo::has_deoptimization_support() {
+  Code* code = this->code();
+  return code->kind() == Code::FUNCTION && code->has_deoptimization_support();
+}
+
+
 bool JSFunction::IsBuiltin() {
   return context()->global()->IsJSBuiltinsObject();
 }
 
 
+bool JSFunction::NeedsArgumentsAdaption() {
+  return shared()->formal_parameter_count() !=
+      SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+}
+
+
+bool JSFunction::IsOptimized() {
+  return code()->kind() == Code::OPTIMIZED_FUNCTION;
+}
+
+
+bool JSFunction::IsMarkedForLazyRecompilation() {
+  return code() == Builtins::builtin(Builtins::LazyRecompile);
+}
+
+
 Code* JSFunction::code() {
   return Code::cast(unchecked_code());
 }
@@ -2874,6 +3110,23 @@
 }
 
 
+void JSFunction::ReplaceCode(Code* code) {
+  bool was_optimized = IsOptimized();
+  bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
+
+  set_code(code);
+
+  // Add/remove the function from the list of optimized functions for this
+  // context based on the state change.
+  if (!was_optimized && is_optimized) {
+    context()->global_context()->AddOptimizedFunction(this);
+  }
+  if (was_optimized && !is_optimized) {
+    context()->global_context()->RemoveOptimizedFunction(this);
+  }
+}
+
+
 Context* JSFunction::context() {
   return Context::cast(READ_FIELD(this, kContextOffset));
 }
@@ -3007,6 +3260,7 @@
 
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
+ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
 
 
 byte* Code::instruction_start()  {
@@ -3024,6 +3278,12 @@
 }
 
 
+FixedArray* Code::unchecked_deoptimization_data() {
+  return reinterpret_cast<FixedArray*>(
+      READ_FIELD(this, kDeoptimizationDataOffset));
+}
+
+
 ByteArray* Code::unchecked_relocation_info() {
   return reinterpret_cast<ByteArray*>(READ_FIELD(this, kRelocationInfoOffset));
 }
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index ed76cb9..55a0a53 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -352,6 +352,7 @@
 void Code::CodeIterateBody(ObjectVisitor* v) {
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
                   RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
                   RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
                   RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
@@ -361,9 +362,8 @@
   // the heap compaction in the next statement.
   RelocIterator it(this, mode_mask);
 
-  IteratePointers(v,
-                  kRelocationInfoOffset,
-                  kRelocationInfoOffset + kPointerSize);
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
 
   for (; !it.done(); it.next()) {
     it.rinfo()->Visit(v);
@@ -375,6 +375,7 @@
 void Code::CodeIterateBody() {
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
                   RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
                   RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
                   RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
@@ -386,6 +387,8 @@
 
   StaticVisitor::VisitPointer(
       reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
 
   for (; !it.done(); it.next()) {
     it.rinfo()->template Visit<StaticVisitor>();
diff --git a/src/objects.cc b/src/objects.cc
index dcfd926..ab2f964 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -30,17 +30,24 @@
 #include "api.h"
 #include "arguments.h"
 #include "bootstrapper.h"
+#include "codegen.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
+#include "full-codegen.h"
+#include "hydrogen.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
 #include "macro-assembler.h"
+#include "safepoint-table.h"
 #include "scanner-base.h"
 #include "scopeinfo.h"
 #include "string-stream.h"
 #include "utils.h"
+#include "vm-state-inl.h"
 
 #ifdef ENABLE_DISASSEMBLER
+#include "disasm.h"
 #include "disassembler.h"
 #endif
 
@@ -546,11 +553,11 @@
 }
 
 
-void Object::ShortPrint() {
+void Object::ShortPrint(FILE* out) {
   HeapStringAllocator allocator;
   StringStream accumulator(&allocator);
   ShortPrint(&accumulator);
-  accumulator.OutputToStdOut();
+  accumulator.OutputToFile(out);
 }
 
 
@@ -565,8 +572,8 @@
 }
 
 
-void Smi::SmiPrint() {
-  PrintF("%d", value());
+void Smi::SmiPrint(FILE* out) {
+  PrintF(out, "%d", value());
 }
 
 
@@ -580,8 +587,8 @@
 }
 
 
-void Failure::FailurePrint() {
-  PrintF("Failure(%p)", reinterpret_cast<void*>(value()));
+void Failure::FailurePrint(FILE* out) {
+  PrintF(out, "Failure(%p)", reinterpret_cast<void*>(value()));
 }
 
 
@@ -1134,8 +1141,8 @@
 }
 
 
-void HeapNumber::HeapNumberPrint() {
-  PrintF("%.16g", Number());
+void HeapNumber::HeapNumberPrint(FILE* out) {
+  PrintF(out, "%.16g", Number());
 }
 
 
@@ -1728,6 +1735,23 @@
 }
 
 
+void Map::LookupInDescriptors(JSObject* holder,
+                              String* name,
+                              LookupResult* result) {
+  DescriptorArray* descriptors = instance_descriptors();
+  int number = DescriptorLookupCache::Lookup(descriptors, name);
+  if (number == DescriptorLookupCache::kAbsent) {
+    number = descriptors->Search(name);
+    DescriptorLookupCache::Update(descriptors, name, number);
+  }
+  if (number != DescriptorArray::kNotFound) {
+    result->DescriptorResult(holder, descriptors->GetDetails(number), number);
+  } else {
+    result->NotFound();
+  }
+}
+
+
 void JSObject::LocalLookupRealNamedProperty(String* name,
                                             LookupResult* result) {
   if (IsJSGlobalProxy()) {
@@ -3051,6 +3075,10 @@
       if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
     }
     set_map(Map::cast(new_map));
+    // When running crankshaft, changing the map is not enough. We
+    // need to deoptimize all functions that rely on this global
+    // object.
+    Deoptimizer::DeoptimizeGlobalObject(this);
   }
 
   // Update the dictionary with the new CALLBACKS property.
@@ -3069,8 +3097,9 @@
 
 MaybeObject* JSObject::DefineAccessor(String* name,
                                       bool is_getter,
-                                      JSFunction* fun,
+                                      Object* fun,
                                       PropertyAttributes attributes) {
+  ASSERT(fun->IsJSFunction() || fun->IsUndefined());
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
       !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
@@ -4123,6 +4152,22 @@
 }
 
 
+MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
+                                               PretenureFlag pretenure) {
+  ASSERT(deopt_entry_count > 0);
+  return Heap::AllocateFixedArray(LengthFor(deopt_entry_count),
+                                  pretenure);
+}
+
+
+MaybeObject* DeoptimizationOutputData::Allocate(int number_of_deopt_points,
+                                                PretenureFlag pretenure) {
+  if (number_of_deopt_points == 0) return Heap::empty_fixed_array();
+  return Heap::AllocateFixedArray(LengthOfFixedArray(number_of_deopt_points),
+                                  pretenure);
+}
+
+
 #ifdef DEBUG
 bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
   if (IsEmpty()) return other->IsEmpty();
@@ -5331,6 +5376,38 @@
 }
 
 
+void JSFunction::MarkForLazyRecompilation() {
+  ASSERT(is_compiled() && !IsOptimized());
+  ASSERT(shared()->allows_lazy_compilation());
+  ReplaceCode(Builtins::builtin(Builtins::LazyRecompile));
+}
+
+
+uint32_t JSFunction::SourceHash() {
+  uint32_t hash = 0;
+  Object* script = shared()->script();
+  if (!script->IsUndefined()) {
+    Object* source = Script::cast(script)->source();
+    if (source->IsUndefined()) hash = String::cast(source)->Hash();
+  }
+  hash ^= ComputeIntegerHash(shared()->start_position_and_type());
+  hash += ComputeIntegerHash(shared()->end_position());
+  return hash;
+}
+
+
+bool JSFunction::IsInlineable() {
+  if (IsBuiltin()) return false;
+  // Check that the function has a script associated with it.
+  if (!shared()->script()->IsScript()) return false;
+  Code* code = shared()->code();
+  if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
+  // If we never ran this (unlikely) then lets try to optimize it.
+  if (code->kind() != Code::FUNCTION) return true;
+  return code->optimizable();
+}
+
+
 Object* JSFunction::SetInstancePrototype(Object* value) {
   ASSERT(value->IsJSObject());
 
@@ -5390,6 +5467,12 @@
 }
 
 
+void JSFunction::PrintName(FILE* out) {
+  SmartPointer<char> name = shared()->DebugName()->ToCString();
+  PrintF(out, "%s", *name);
+}
+
+
 Context* JSFunction::GlobalContextFromLiterals(FixedArray* literals) {
   return Context::cast(literals->get(JSFunction::kLiteralGlobalContextIndex));
 }
@@ -5420,15 +5503,19 @@
 
 
 Object* SharedFunctionInfo::GetSourceCode() {
+  if (!HasSourceCode()) return Heap::undefined_value();
   HandleScope scope;
-  if (script()->IsUndefined()) return Heap::undefined_value();
   Object* source = Script::cast(script())->source();
-  if (source->IsUndefined()) return Heap::undefined_value();
   return *SubString(Handle<String>(String::cast(source)),
                     start_position(), end_position());
 }
 
 
+int SharedFunctionInfo::SourceSize() {
+  return end_position() - start_position();
+}
+
+
 int SharedFunctionInfo::CalculateInstanceSize() {
   int instance_size =
       JSObject::kHeaderSize +
@@ -5546,8 +5633,7 @@
 void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
                                          int max_length) {
   // For some native functions there is no source.
-  if (script()->IsUndefined() ||
-      Script::cast(script())->source()->IsUndefined()) {
+  if (!HasSourceCode()) {
     accumulator->Add("<No Source>");
     return;
   }
@@ -5572,17 +5658,63 @@
   }
 
   int len = end_position() - start_position();
-  if (len > max_length) {
+  if (len <= max_length || max_length < 0) {
+    accumulator->Put(script_source, start_position(), end_position());
+  } else {
     accumulator->Put(script_source,
                      start_position(),
                      start_position() + max_length);
     accumulator->Add("...\n");
-  } else {
-    accumulator->Put(script_source, start_position(), end_position());
   }
 }
 
 
+static bool IsCodeEquivalent(Code* code, Code* recompiled) {
+  if (code->instruction_size() != recompiled->instruction_size()) return false;
+  ByteArray* code_relocation = code->relocation_info();
+  ByteArray* recompiled_relocation = recompiled->relocation_info();
+  int length = code_relocation->length();
+  if (length != recompiled_relocation->length()) return false;
+  int compare = memcmp(code_relocation->GetDataStartAddress(),
+                       recompiled_relocation->GetDataStartAddress(),
+                       length);
+  return compare == 0;
+}
+
+
+void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
+  ASSERT(!has_deoptimization_support());
+  AssertNoAllocation no_allocation;
+  Code* code = this->code();
+  if (IsCodeEquivalent(code, recompiled)) {
+    // Copy the deoptimization data from the recompiled code.
+    code->set_deoptimization_data(recompiled->deoptimization_data());
+    code->set_has_deoptimization_support(true);
+  } else {
+    // TODO(3025757): In case the recompiled isn't equivalent to the
+    // old code, we have to replace it. We should try to avoid this
+    // altogether because it flushes valuable type feedback by
+    // effectively resetting all IC state.
+    set_code(recompiled);
+  }
+  ASSERT(has_deoptimization_support());
+}
+
+
+bool SharedFunctionInfo::VerifyBailoutId(int id) {
+  // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
+  // we are always bailing out on ARM.
+
+  ASSERT(id != AstNode::kNoNumber);
+  Code* unoptimized = code();
+  DeoptimizationOutputData* data =
+      DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
+  unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this);
+  USE(ignore);
+  return true;  // Return true if there was no ASSERT.
+}
+
+
 void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
   ASSERT(!IsInobjectSlackTrackingInProgress());
 
@@ -5703,6 +5835,17 @@
 }
 
 
+void ObjectVisitor::VisitGlobalPropertyCell(RelocInfo* rinfo) {
+  ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Object* cell = rinfo->target_cell();
+  Object* old_cell = cell;
+  VisitPointer(&cell);
+  if (cell != old_cell) {
+    rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+  }
+}
+
+
 void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
   ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
           rinfo->IsPatchedReturnSequence()) ||
@@ -5715,6 +5858,12 @@
 }
 
 
+void Code::InvalidateRelocation() {
+  HandleScope scope;
+  set_relocation_info(Heap::empty_byte_array());
+}
+
+
 void Code::Relocate(intptr_t delta) {
   for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
     it.rinfo()->apply(delta);
@@ -5736,6 +5885,7 @@
   intptr_t delta = instruction_start() - desc.buffer;
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
                   RelocInfo::kApplyMask;
   Assembler* origin = desc.origin;  // Needed to find target_object on X64.
   for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
@@ -5743,6 +5893,9 @@
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       it.rinfo()->set_target_object(*p);
+    } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
+      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
+      it.rinfo()->set_target_cell(*cell);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
@@ -5813,11 +5966,195 @@
 }
 
 
+uint8_t* Code::GetSafepointEntry(Address pc) {
+  SafepointTable table(this);
+  unsigned pc_offset = static_cast<unsigned>(pc - instruction_start());
+  for (unsigned i = 0; i < table.length(); i++) {
+    // TODO(kasperl): Replace the linear search with binary search.
+    if (table.GetPcOffset(i) == pc_offset) return table.GetEntry(i);
+  }
+  return NULL;
+}
+
+
+void Code::SetNoStackCheckTable() {
+  // Indicate the absence of a stack-check table by a table start after the
+  // end of the instructions.  Table start must be aligned, so round up.
+  set_stack_check_table_start(RoundUp(instruction_size(), kIntSize));
+}
+
+
+Map* Code::FindFirstMap() {
+  ASSERT(is_inline_cache_stub());
+  AssertNoAllocation no_allocation;
+  int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    Object* object = info->target_object();
+    if (object->IsMap()) return Map::cast(object);
+  }
+  return NULL;
+}
+
+
 #ifdef ENABLE_DISASSEMBLER
+
+#ifdef OBJECT_PRINT
+
+void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
+  disasm::NameConverter converter;
+  int deopt_count = DeoptCount();
+  PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
+  if (0 == deopt_count) return;
+
+  PrintF(out, "%6s  %6s  %6s  %12s\n", "index", "ast id", "argc", "commands");
+  for (int i = 0; i < deopt_count; i++) {
+    int command_count = 0;
+    PrintF(out, "%6d  %6d  %6d",
+           i, AstId(i)->value(), ArgumentsStackHeight(i)->value());
+    int translation_index = TranslationIndex(i)->value();
+    TranslationIterator iterator(TranslationByteArray(), translation_index);
+    Translation::Opcode opcode =
+        static_cast<Translation::Opcode>(iterator.Next());
+    ASSERT(Translation::BEGIN == opcode);
+    int frame_count = iterator.Next();
+    if (FLAG_print_code_verbose) {
+      PrintF(out, "  %s {count=%d}\n", Translation::StringFor(opcode),
+             frame_count);
+    }
+
+    for (int i = 0; i < frame_count; ++i) {
+      opcode = static_cast<Translation::Opcode>(iterator.Next());
+      ASSERT(Translation::FRAME == opcode);
+      int ast_id = iterator.Next();
+      int function_id = iterator.Next();
+      JSFunction* function =
+          JSFunction::cast(LiteralArray()->get(function_id));
+      unsigned height = iterator.Next();
+      if (FLAG_print_code_verbose) {
+        PrintF(out, "%24s  %s {ast_id=%d, function=",
+               "", Translation::StringFor(opcode), ast_id);
+        function->PrintName(out);
+        PrintF(out, ", height=%u}\n", height);
+      }
+
+      // Size of translation is height plus all incoming arguments including
+      // receiver.
+      int size = height + function->shared()->formal_parameter_count() + 1;
+      command_count += size;
+      for (int j = 0; j < size; ++j) {
+        opcode = static_cast<Translation::Opcode>(iterator.Next());
+        if (FLAG_print_code_verbose) {
+          PrintF(out, "%24s    %s ", "", Translation::StringFor(opcode));
+        }
+
+        if (opcode == Translation::DUPLICATE) {
+          opcode = static_cast<Translation::Opcode>(iterator.Next());
+          if (FLAG_print_code_verbose) {
+            PrintF(out, "%s ", Translation::StringFor(opcode));
+          }
+          --j;  // Two commands share the same frame index.
+        }
+
+        switch (opcode) {
+          case Translation::BEGIN:
+          case Translation::FRAME:
+          case Translation::DUPLICATE:
+            UNREACHABLE();
+            break;
+
+          case Translation::REGISTER: {
+            int reg_code = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+            }
+            break;
+          }
+
+          case Translation::INT32_REGISTER: {
+            int reg_code = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+            }
+            break;
+          }
+
+          case Translation::DOUBLE_REGISTER: {
+            int reg_code = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{input=%s}",
+                     DoubleRegister::AllocationIndexToString(reg_code));
+            }
+            break;
+          }
+
+          case Translation::STACK_SLOT: {
+            int input_slot_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{input=%d}", input_slot_index);
+            }
+            break;
+          }
+
+          case Translation::INT32_STACK_SLOT: {
+            int input_slot_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{input=%d}", input_slot_index);
+            }
+            break;
+          }
+
+          case Translation::DOUBLE_STACK_SLOT: {
+            int input_slot_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{input=%d}", input_slot_index);
+            }
+            break;
+          }
+
+          case Translation::LITERAL: {
+            unsigned literal_index = iterator.Next();
+            if (FLAG_print_code_verbose)  {
+              PrintF(out, "{literal_id=%u}", literal_index);
+            }
+            break;
+          }
+
+          case Translation::ARGUMENTS_OBJECT:
+            break;
+        }
+        if (FLAG_print_code_verbose) PrintF(out, "\n");
+      }
+    }
+    if (!FLAG_print_code_verbose) PrintF(out, "  %12d\n", command_count);
+  }
+}
+
+
+void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
+  PrintF(out, "Deoptimization Output Data (deopt points = %d)\n",
+         this->DeoptPoints());
+  if (this->DeoptPoints() == 0) return;
+
+  PrintF("%6s  %8s  %s\n", "ast id", "pc", "state");
+  for (int i = 0; i < this->DeoptPoints(); i++) {
+    int pc_and_state = this->PcAndState(i)->value();
+    PrintF("%6d  %8d  %s\n",
+           this->AstId(i)->value(),
+           FullCodeGenerator::PcField::decode(pc_and_state),
+           FullCodeGenerator::State2String(
+               FullCodeGenerator::StateField::decode(pc_and_state)));
+  }
+}
+
+#endif
+
+
 // Identify kind of code.
 const char* Code::Kind2String(Kind kind) {
   switch (kind) {
     case FUNCTION: return "FUNCTION";
+    case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
     case STUB: return "STUB";
     case BUILTIN: return "BUILTIN";
     case LOAD_IC: return "LOAD_IC";
@@ -5827,6 +6164,8 @@
     case CALL_IC: return "CALL_IC";
     case KEYED_CALL_IC: return "KEYED_CALL_IC";
     case BINARY_OP_IC: return "BINARY_OP_IC";
+    case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
+    case COMPARE_IC: return "COMPARE_IC";
   }
   UNREACHABLE();
   return NULL;
@@ -5863,27 +6202,78 @@
   return NULL;
 }
 
-void Code::Disassemble(const char* name) {
-  PrintF("kind = %s\n", Kind2String(kind()));
+
+void Code::Disassemble(const char* name, FILE* out) {
+  PrintF(out, "kind = %s\n", Kind2String(kind()));
   if (is_inline_cache_stub()) {
-    PrintF("ic_state = %s\n", ICState2String(ic_state()));
-    PrintF("ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
+    PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
+    PrintF(out, "ic_in_loop = %d\n", ic_in_loop() == IN_LOOP);
     if (ic_state() == MONOMORPHIC) {
-      PrintF("type = %s\n", PropertyType2String(type()));
+      PrintF(out, "type = %s\n", PropertyType2String(type()));
     }
   }
   if ((name != NULL) && (name[0] != '\0')) {
-    PrintF("name = %s\n", name);
+    PrintF(out, "name = %s\n", name);
+  }
+  if (kind() == OPTIMIZED_FUNCTION) {
+    PrintF(out, "stack_slots = %d\n", stack_slots());
   }
 
-  PrintF("Instructions (size = %d)\n", instruction_size());
-  Disassembler::Decode(NULL, this);
+  PrintF(out, "Instructions (size = %d)\n", instruction_size());
+  Disassembler::Decode(out, this);
+  PrintF(out, "\n");
+
+#ifdef DEBUG
+  if (kind() == FUNCTION) {
+    DeoptimizationOutputData* data =
+        DeoptimizationOutputData::cast(this->deoptimization_data());
+    data->DeoptimizationOutputDataPrint(out);
+  } else if (kind() == OPTIMIZED_FUNCTION) {
+    DeoptimizationInputData* data =
+        DeoptimizationInputData::cast(this->deoptimization_data());
+    data->DeoptimizationInputDataPrint(out);
+  }
   PrintF("\n");
+#endif
+
+  if (kind() == OPTIMIZED_FUNCTION) {
+    SafepointTable table(this);
+    PrintF(out, "Safepoints (size = %u)\n", table.size());
+    for (unsigned i = 0; i < table.length(); i++) {
+      unsigned pc_offset = table.GetPcOffset(i);
+      PrintF(out, "%p  %4d  ", (instruction_start() + pc_offset), pc_offset);
+      table.PrintEntry(i);
+      PrintF(out, " (sp -> fp)");
+      int deoptimization_index = table.GetDeoptimizationIndex(i);
+      if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+        PrintF(out, "  %6d", deoptimization_index);
+      } else {
+        PrintF(out, "  <none>");
+      }
+      PrintF(out, "\n");
+    }
+    PrintF(out, "\n");
+  } else if (kind() == FUNCTION) {
+    unsigned offset = stack_check_table_start();
+    // If there is no stack check table, the "table start" will at or after
+    // (due to alignment) the end of the instruction stream.
+    if (static_cast<int>(offset) < instruction_size()) {
+      unsigned* address =
+          reinterpret_cast<unsigned*>(instruction_start() + offset);
+      unsigned length = address[0];
+      PrintF(out, "Stack checks (size = %u)\n", length);
+      PrintF(out, "ast_id  pc_offset\n");
+      for (unsigned i = 0; i < length; ++i) {
+        unsigned index = (2 * i) + 1;
+        PrintF(out, "%6u  %9u\n", address[index], address[index + 1]);
+      }
+      PrintF(out, "\n");
+    }
+  }
 
   PrintF("RelocInfo (size = %d)\n", relocation_size());
-  for (RelocIterator it(this); !it.done(); it.next())
-    it.rinfo()->Print();
-  PrintF("\n");
+  for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out);
+  PrintF(out, "\n");
 }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -6265,13 +6655,6 @@
     return UNDEFINED_ELEMENT;
   }
 
-  if (IsJSGlobalProxy()) {
-    Object* proto = GetPrototype();
-    if (proto->IsNull()) return UNDEFINED_ELEMENT;
-    ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->HasLocalElement(index);
-  }
-
   // Check for lookup interceptor
   if (HasIndexedInterceptor()) {
     return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
@@ -7039,22 +7422,22 @@
 // class. This requires us to have the template functions put
 // together, so even though this function belongs in objects-debug.cc,
 // we keep it here instead to satisfy certain compilers.
-#ifdef DEBUG
+#ifdef OBJECT_PRINT
 template<typename Shape, typename Key>
-void Dictionary<Shape, Key>::Print() {
+void Dictionary<Shape, Key>::Print(FILE* out) {
   int capacity = HashTable<Shape, Key>::Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = HashTable<Shape, Key>::KeyAt(i);
     if (HashTable<Shape, Key>::IsKey(k)) {
-      PrintF(" ");
+      PrintF(out, " ");
       if (k->IsString()) {
-        String::cast(k)->StringPrint();
+        String::cast(k)->StringPrint(out);
       } else {
-        k->ShortPrint();
+        k->ShortPrint(out);
       }
-      PrintF(": ");
-      ValueAt(i)->ShortPrint();
-      PrintF("\n");
+      PrintF(out, ": ");
+      ValueAt(i)->ShortPrint(out);
+      PrintF(out, "\n");
     }
   }
 }
@@ -8311,11 +8694,10 @@
 }
 
 
-Object* GlobalObject::GetPropertyCell(LookupResult* result) {
+JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
-  ASSERT(value->IsJSGlobalPropertyCell());
-  return value;
+  return JSGlobalPropertyCell::cast(value);
 }
 
 
@@ -8571,6 +8953,20 @@
 }
 
 
+void CompilationCacheTable::Remove(Object* value) {
+  for (int entry = 0, size = Capacity(); entry < size; entry++) {
+    int entry_index = EntryToIndex(entry);
+    int value_index = entry_index + 1;
+    if (get(value_index) == value) {
+      fast_set(this, entry_index, Heap::null_value());
+      fast_set(this, value_index, Heap::null_value());
+      ElementRemoved();
+    }
+  }
+  return;
+}
+
+
 // SymbolsKey used for HashTable where key is array of symbols.
 class SymbolsKey : public HashTableKey {
  public:
diff --git a/src/objects.h b/src/objects.h
index b52bac2..c5fda7d 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -585,6 +585,7 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
+
 class MaybeObject BASE_EMBEDDED {
  public:
   inline bool IsFailure();
@@ -606,10 +607,18 @@
     return reinterpret_cast<Object*>(this);
   }
 
-#ifdef DEBUG
+#ifdef OBJECT_PRINT
   // Prints this object with details.
-  void Print();
-  void PrintLn();
+  inline void Print() {
+    Print(stdout);
+  };
+  inline void PrintLn() {
+    PrintLn(stdout);
+  }
+  void Print(FILE* out);
+  void PrintLn(FILE* out);
+#endif
+#ifdef DEBUG
   // Verifies the object.
   void Verify();
 #endif
@@ -654,6 +663,8 @@
   inline bool IsMap();
   inline bool IsFixedArray();
   inline bool IsDescriptorArray();
+  inline bool IsDeoptimizationInputData();
+  inline bool IsDeoptimizationOutputData();
   inline bool IsContext();
   inline bool IsCatchContext();
   inline bool IsGlobalContext();
@@ -759,7 +770,10 @@
 #endif
 
   // Prints this object without details.
-  void ShortPrint();
+  inline void ShortPrint() {
+    ShortPrint(stdout);
+  }
+  void ShortPrint(FILE* out);
 
   // Prints this object without details to a message accumulator.
   void ShortPrint(StringStream* accumulator);
@@ -798,7 +812,10 @@
   static inline Smi* cast(Object* object);
 
   // Dispatched behavior.
-  void SmiPrint();
+  inline void SmiPrint() {
+    SmiPrint(stdout);
+  }
+  void SmiPrint(FILE* out);
   void SmiPrint(StringStream* accumulator);
 #ifdef DEBUG
   void SmiVerify();
@@ -867,7 +884,10 @@
   static inline Failure* cast(MaybeObject* object);
 
   // Dispatched behavior.
-  void FailurePrint();
+  inline void FailurePrint() {
+    FailurePrint(stdout);
+  }
+  void FailurePrint(FILE* out);
   void FailurePrint(StringStream* accumulator);
 #ifdef DEBUG
   void FailureVerify();
@@ -1096,14 +1116,23 @@
 
   // Dispatched behavior.
   void HeapObjectShortPrint(StringStream* accumulator);
+#ifdef OBJECT_PRINT
+  inline void HeapObjectPrint() {
+    HeapObjectPrint(stdout);
+  }
+  void HeapObjectPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void HeapObjectPrint();
   void HeapObjectVerify();
   inline void VerifyObjectField(int offset);
   inline void VerifySmiField(int offset);
+#endif
 
-  void PrintHeader(const char* id);
+#ifdef OBJECT_PRINT
+  void PrintHeader(FILE* out, const char* id);
+#endif
 
+#ifdef DEBUG
   // Verify a pointer is a valid HeapObject pointer that points to object
   // areas in the heap.
   static void VerifyHeapPointer(Object* p);
@@ -1186,7 +1215,10 @@
 
   // Dispatched behavior.
   Object* HeapNumberToBoolean();
-  void HeapNumberPrint();
+  inline void HeapNumberPrint() {
+    HeapNumberPrint(stdout);
+  }
+  void HeapNumberPrint(FILE* out);
   void HeapNumberPrint(StringStream* accumulator);
 #ifdef DEBUG
   void HeapNumberVerify();
@@ -1365,7 +1397,7 @@
 
   MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
                                               bool is_getter,
-                                              JSFunction* fun,
+                                              Object* fun,
                                               PropertyAttributes attributes);
   Object* LookupAccessor(String* name, bool is_getter);
 
@@ -1646,12 +1678,28 @@
 
   // Dispatched behavior.
   void JSObjectShortPrint(StringStream* accumulator);
+#ifdef OBJECT_PRINT
+  inline void JSObjectPrint() {
+    JSObjectPrint(stdout);
+  }
+  void JSObjectPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSObjectPrint();
   void JSObjectVerify();
-  void PrintProperties();
-  void PrintElements();
+#endif
+#ifdef OBJECT_PRINT
+  inline void PrintProperties() {
+    PrintProperties(stdout);
+  }
+  void PrintProperties(FILE* out);
 
+  inline void PrintElements() {
+    PrintElements(stdout);
+  }
+  void PrintElements(FILE* out);
+#endif
+
+#ifdef DEBUG
   // Structure for collecting spill information about JSObjects.
   class SpillInformation {
    public:
@@ -1686,7 +1734,7 @@
   static const uint32_t kMaxGap = 1024;
   static const int kMaxFastElementsLength = 5000;
   static const int kInitialMaxFastElementArray = 100000;
-  static const int kMaxFastProperties = 8;
+  static const int kMaxFastProperties = 12;
   static const int kMaxInstanceSize = 255 * kPointerSize;
   // When extending the backing storage for property values, we increase
   // its size by more than the 1 entry necessary, so sequentially adding fields
@@ -1832,8 +1880,13 @@
   static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void FixedArrayPrint() {
+    FixedArrayPrint(stdout);
+  }
+  void FixedArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void FixedArrayPrint();
   void FixedArrayVerify();
   // Checks if two FixedArrays have identical contents.
   bool IsEqualTo(FixedArray* other);
@@ -2009,10 +2062,15 @@
   static const int kEnumCacheBridgeCacheOffset =
     kEnumCacheBridgeEnumOffset + kPointerSize;
 
-#ifdef DEBUG
+#ifdef OBJECT_PRINT
   // Print all the descriptors.
-  void PrintDescriptors();
+  inline void PrintDescriptors() {
+    PrintDescriptors(stdout);
+  }
+  void PrintDescriptors(FILE* out);
+#endif
 
+#ifdef DEBUG
   // Is the descriptor array sorted and without duplicates?
   bool IsSortedNoDuplicates();
 
@@ -2393,8 +2451,11 @@
   // Ensure enough space for n additional elements.
   MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
 
-#ifdef DEBUG
-  void Print();
+#ifdef OBJECT_PRINT
+  inline void Print() {
+    Print(stdout);
+  }
+  void Print(FILE* out);
 #endif
   // Returns the key (slow).
   Object* SlowReverseLookup(Object* value);
@@ -2616,8 +2677,13 @@
   inline int ByteArraySize() {
     return SizeFor(this->length());
   }
+#ifdef OBJECT_PRINT
+  inline void ByteArrayPrint() {
+    ByteArrayPrint(stdout);
+  }
+  void ByteArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ByteArrayPrint();
   void ByteArrayVerify();
 #endif
 
@@ -2666,8 +2732,13 @@
   // Casting.
   static inline PixelArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void PixelArrayPrint() {
+    PixelArrayPrint(stdout);
+  }
+  void PixelArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void PixelArrayPrint();
   void PixelArrayVerify();
 #endif  // DEBUG
 
@@ -2738,8 +2809,13 @@
   // Casting.
   static inline ExternalByteArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalByteArrayPrint() {
+    ExternalByteArrayPrint(stdout);
+  }
+  void ExternalByteArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalByteArrayPrint();
   void ExternalByteArrayVerify();
 #endif  // DEBUG
 
@@ -2761,8 +2837,13 @@
   // Casting.
   static inline ExternalUnsignedByteArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalUnsignedByteArrayPrint() {
+    ExternalUnsignedByteArrayPrint(stdout);
+  }
+  void ExternalUnsignedByteArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalUnsignedByteArrayPrint();
   void ExternalUnsignedByteArrayVerify();
 #endif  // DEBUG
 
@@ -2784,8 +2865,13 @@
   // Casting.
   static inline ExternalShortArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalShortArrayPrint() {
+    ExternalShortArrayPrint(stdout);
+  }
+  void ExternalShortArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalShortArrayPrint();
   void ExternalShortArrayVerify();
 #endif  // DEBUG
 
@@ -2807,8 +2893,13 @@
   // Casting.
   static inline ExternalUnsignedShortArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalUnsignedShortArrayPrint() {
+    ExternalUnsignedShortArrayPrint(stdout);
+  }
+  void ExternalUnsignedShortArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalUnsignedShortArrayPrint();
   void ExternalUnsignedShortArrayVerify();
 #endif  // DEBUG
 
@@ -2830,8 +2921,13 @@
   // Casting.
   static inline ExternalIntArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalIntArrayPrint() {
+    ExternalIntArrayPrint(stdout);
+  }
+  void ExternalIntArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalIntArrayPrint();
   void ExternalIntArrayVerify();
 #endif  // DEBUG
 
@@ -2853,8 +2949,13 @@
   // Casting.
   static inline ExternalUnsignedIntArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalUnsignedIntArrayPrint() {
+    ExternalUnsignedIntArrayPrint(stdout);
+  }
+  void ExternalUnsignedIntArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalUnsignedIntArrayPrint();
   void ExternalUnsignedIntArrayVerify();
 #endif  // DEBUG
 
@@ -2876,8 +2977,13 @@
   // Casting.
   static inline ExternalFloatArray* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ExternalFloatArrayPrint() {
+    ExternalFloatArrayPrint(stdout);
+  }
+  void ExternalFloatArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ExternalFloatArrayPrint();
   void ExternalFloatArrayVerify();
 #endif  // DEBUG
 
@@ -2886,6 +2992,122 @@
 };
 
 
+// DeoptimizationInputData is a fixed array used to hold the deoptimization
+// data for code generated by the Hydrogen/Lithium compiler.  It also
+// contains information about functions that were inlined.  If N different
+// functions were inlined then first N elements of the literal array will
+// contain these functions.
+//
+// It can be empty.
+class DeoptimizationInputData: public FixedArray {
+ public:
+  // Layout description.  Indices in the array.
+  static const int kTranslationByteArrayIndex = 0;
+  static const int kInlinedFunctionCountIndex = 1;
+  static const int kLiteralArrayIndex = 2;
+  static const int kOsrAstIdIndex = 3;
+  static const int kOsrPcOffsetIndex = 4;
+  static const int kFirstDeoptEntryIndex = 5;
+
+  // Offsets of deopt entry elements relative to the start of the entry.
+  static const int kAstIdOffset = 0;
+  static const int kTranslationIndexOffset = 1;
+  static const int kArgumentsStackHeightOffset = 2;
+  static const int kDeoptEntrySize = 3;
+
+  // Simple element accessors.
+#define DEFINE_ELEMENT_ACCESSORS(name, type)      \
+  type* name() {                                  \
+    return type::cast(get(k##name##Index));       \
+  }                                               \
+  void Set##name(type* value) {                   \
+    set(k##name##Index, value);                   \
+  }
+
+  DEFINE_ELEMENT_ACCESSORS(TranslationByteArray, ByteArray)
+  DEFINE_ELEMENT_ACCESSORS(InlinedFunctionCount, Smi)
+  DEFINE_ELEMENT_ACCESSORS(LiteralArray, FixedArray)
+  DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
+  DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
+
+  // Unchecked accessor to be used during GC.
+  FixedArray* UncheckedLiteralArray() {
+    return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
+  }
+
+#undef DEFINE_ELEMENT_ACCESSORS
+
+  // Accessors for elements of the ith deoptimization entry.
+#define DEFINE_ENTRY_ACCESSORS(name, type)                       \
+  type* name(int i) {                                            \
+    return type::cast(get(IndexForEntry(i) + k##name##Offset));  \
+  }                                                              \
+  void Set##name(int i, type* value) {                           \
+    set(IndexForEntry(i) + k##name##Offset, value);              \
+  }
+
+  DEFINE_ENTRY_ACCESSORS(AstId, Smi)
+  DEFINE_ENTRY_ACCESSORS(TranslationIndex, Smi)
+  DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
+
+#undef DEFINE_ENTRY_ACCESSORS
+
+  int DeoptCount() {
+    return (length() - kFirstDeoptEntryIndex) / kDeoptEntrySize;
+  }
+
+  // Allocates a DeoptimizationInputData.
+  MUST_USE_RESULT static MaybeObject* Allocate(int deopt_entry_count,
+                                               PretenureFlag pretenure);
+
+  // Casting.
+  static inline DeoptimizationInputData* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  void DeoptimizationInputDataPrint(FILE* out);
+#endif
+
+ private:
+  static int IndexForEntry(int i) {
+    return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
+  }
+
+  static int LengthFor(int entry_count) {
+    return IndexForEntry(entry_count);
+  }
+};
+
+
+// DeoptimizationOutputData is a fixed array used to hold the deoptimization
+// data for code generated by the full compiler.
+// The format of the these objects is
+//   [i * 2]: Ast ID for ith deoptimization.
+//   [i * 2 + 1]: PC and state of ith deoptimization
+class DeoptimizationOutputData: public FixedArray {
+ public:
+  int DeoptPoints() { return length() / 2; }
+  Smi* AstId(int index) { return Smi::cast(get(index * 2)); }
+  void SetAstId(int index, Smi* id) { set(index * 2, id); }
+  Smi* PcAndState(int index) { return Smi::cast(get(1 + index * 2)); }
+  void SetPcAndState(int index, Smi* offset) { set(1 + index * 2, offset); }
+
+  static int LengthOfFixedArray(int deopt_points) {
+    return deopt_points * 2;
+  }
+
+  // Allocates a DeoptimizationOutputData.
+  MUST_USE_RESULT static MaybeObject* Allocate(int number_of_deopt_points,
+                                               PretenureFlag pretenure);
+
+  // Casting.
+  static inline DeoptimizationOutputData* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  void DeoptimizationOutputDataPrint(FILE* out);
+#endif
+};
+
+
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
  public:
@@ -2900,6 +3122,7 @@
 
   enum Kind {
     FUNCTION,
+    OPTIMIZED_FUNCTION,
     STUB,
     BUILTIN,
     LOAD_IC,
@@ -2909,13 +3132,15 @@
     STORE_IC,
     KEYED_STORE_IC,
     BINARY_OP_IC,
+    TYPE_RECORDING_BINARY_OP_IC,
+    COMPARE_IC,
     // No more than 16 kinds. The value currently encoded in four bits in
     // Flags.
 
     // Pseudo-kinds.
     REGEXP = BUILTIN,
     FIRST_IC_KIND = LOAD_IC,
-    LAST_IC_KIND = BINARY_OP_IC
+    LAST_IC_KIND = COMPARE_IC
   };
 
   enum {
@@ -2927,7 +3152,10 @@
   static const char* Kind2String(Kind kind);
   static const char* ICState2String(InlineCacheState state);
   static const char* PropertyType2String(PropertyType type);
-  void Disassemble(const char* name);
+  inline void Disassemble(const char* name) {
+    Disassemble(name, stdout);
+  }
+  void Disassemble(const char* name, FILE* out);
 #endif  // ENABLE_DISASSEMBLER
 
   // [instruction_size]: Size of the native instructions
@@ -2936,9 +3164,14 @@
 
   // [relocation_info]: Code relocation information
   DECL_ACCESSORS(relocation_info, ByteArray)
+  void InvalidateRelocation();
 
-  // Unchecked accessor to be used during GC.
+  // [deoptimization_data]: Array containing data for deopt.
+  DECL_ACCESSORS(deoptimization_data, FixedArray)
+
+  // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
+  inline FixedArray* unchecked_deoptimization_data();
 
   inline int relocation_size();
 
@@ -2961,10 +3194,77 @@
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
   inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
+  inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
+  inline bool is_type_recording_binary_op_stub() {
+    return kind() == TYPE_RECORDING_BINARY_OP_IC;
+  }
+  inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; }
 
   // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
   inline int major_key();
-  inline void set_major_key(int major);
+  inline void set_major_key(int value);
+
+  // [optimizable]: For FUNCTION kind, tells if it is optimizable.
+  inline bool optimizable();
+  inline void set_optimizable(bool value);
+
+  // [has_deoptimization_support]: For FUNCTION kind, tells if it has
+  // deoptimization support.
+  inline bool has_deoptimization_support();
+  inline void set_has_deoptimization_support(bool value);
+
+  // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
+  // how long the function has been marked for OSR and therefore which
+  // level of loop nesting we are willing to do on-stack replacement
+  // for.
+  inline void set_allow_osr_at_loop_nesting_level(int level);
+  inline int allow_osr_at_loop_nesting_level();
+
+  // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
+  // reserved in the code prologue.
+  inline unsigned stack_slots();
+  inline void set_stack_slots(unsigned slots);
+
+  // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
+  // the instruction stream where the safepoint table starts.
+  inline unsigned safepoint_table_start();
+  inline void set_safepoint_table_start(unsigned offset);
+
+  // [stack_check_table_start]: For kind FUNCTION, the offset in the
+  // instruction stream where the stack check table starts.
+  inline unsigned stack_check_table_start();
+  inline void set_stack_check_table_start(unsigned offset);
+
+  // [check type]: For kind CALL_IC, tells how to check if the
+  // receiver is valid for the given call.
+  inline CheckType check_type();
+  inline void set_check_type(CheckType value);
+
+  // [binary op type]: For all BINARY_OP_IC.
+  inline byte binary_op_type();
+  inline void set_binary_op_type(byte value);
+
+  // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
+  inline byte type_recording_binary_op_type();
+  inline void set_type_recording_binary_op_type(byte value);
+  inline byte type_recording_binary_op_result_type();
+  inline void set_type_recording_binary_op_result_type(byte value);
+
+  // [compare state]: For kind compare IC stubs, tells what state the
+  // stub is in.
+  inline byte compare_state();
+  inline void set_compare_state(byte value);
+
+  // Get the safepoint entry for the given pc. Returns NULL for
+  // non-safepoint pcs.
+  uint8_t* GetSafepointEntry(Address pc);
+
+  // Mark this code object as not having a stack check table.  Assumes kind
+  // is FUNCTION.
+  void SetNoStackCheckTable();
+
+  // Find the first map in an IC stub.
+  Map* FindFirstMap();
 
   // Flags operations.
   static inline Flags ComputeFlags(Kind kind,
@@ -3048,22 +3348,54 @@
 
   template<typename StaticVisitor>
   inline void CodeIterateBody();
+#ifdef OBJECT_PRINT
+  inline void CodePrint() {
+    CodePrint(stdout);
+  }
+  void CodePrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void CodePrint();
   void CodeVerify();
 #endif
+
+  // Max loop nesting marker used to postpose OSR. We don't take loop
+  // nesting that is deeper than 5 levels into account.
+  static const int kMaxLoopNestingMarker = 6;
+
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
-  static const int kFlagsOffset = kRelocationInfoOffset + kPointerSize;
+  static const int kDeoptimizationDataOffset =
+      kRelocationInfoOffset + kPointerSize;
+  static const int kFlagsOffset = kDeoptimizationDataOffset + kPointerSize;
   static const int kKindSpecificFlagsOffset  = kFlagsOffset + kIntSize;
+
+  static const int kKindSpecificFlagsSize = 2 * kIntSize;
+
+  static const int kHeaderPaddingStart = kKindSpecificFlagsOffset +
+      kKindSpecificFlagsSize;
+
   // Add padding to align the instruction start following right after
   // the Code object header.
   static const int kHeaderSize =
-      CODE_POINTER_ALIGN(kKindSpecificFlagsOffset + kIntSize);
+      (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
 
   // Byte offsets within kKindSpecificFlagsOffset.
-  static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset + 1;
+  static const int kStubMajorKeyOffset = kKindSpecificFlagsOffset;
+  static const int kOptimizableOffset = kKindSpecificFlagsOffset;
+  static const int kStackSlotsOffset = kKindSpecificFlagsOffset;
+  static const int kCheckTypeOffset = kKindSpecificFlagsOffset;
+
+  static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
+  static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
+  static const int kHasDeoptimizationSupportOffset = kOptimizableOffset + 1;
+
+  static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
+  static const int kAllowOSRAtLoopNestingLevelOffset =
+      kHasDeoptimizationSupportOffset + 1;
+
+  static const int kSafepointTableStartOffset = kStackSlotsOffset + kIntSize;
+  static const int kStackCheckTableStartOffset = kStackSlotsOffset + kIntSize;
 
   // Flags layout.
   static const int kFlagsICStateShift        = 0;
@@ -3239,6 +3571,13 @@
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
 
+  // Lookup in the map's instance descriptors and fill out the result
+  // with the given holder if the name is found. The holder may be
+  // NULL when this function is used from the compiler.
+  void LookupInDescriptors(JSObject* holder,
+                           String* name,
+                           LookupResult* result);
+
   MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
 
   MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode,
@@ -3303,8 +3642,13 @@
   void ClearNonLiveTransitions(Object* real_prototype);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void MapPrint() {
+    MapPrint(stdout);
+  }
+  void MapPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void MapPrint();
   void MapVerify();
   void SharedMapVerify();
 #endif
@@ -3460,8 +3804,13 @@
   // resource is accessible. Otherwise, always return true.
   inline bool HasValidSource();
 
+#ifdef OBJECT_PRINT
+  inline void ScriptPrint() {
+    ScriptPrint(stdout);
+  }
+  void ScriptPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ScriptPrint();
   void ScriptVerify();
 #endif
 
@@ -3486,6 +3835,52 @@
 };
 
 
+// List of builtin functions we want to identify to improve code
+// generation.
+//
+// Each entry has a name of a global object property holding an object
+// optionally followed by ".prototype", a name of a builtin function
+// on the object (the one the id is set for), and a label.
+//
+// Installation of ids for the selected builtin functions is handled
+// by the bootstrapper.
+//
+// NOTE: Order is important: math functions should be at the end of
+// the list and MathFloor should be the first math function.
+#define FUNCTIONS_WITH_ID_LIST(V)                   \
+  V(Array.prototype, push, ArrayPush)               \
+  V(Array.prototype, pop, ArrayPop)                 \
+  V(String.prototype, charCodeAt, StringCharCodeAt) \
+  V(String.prototype, charAt, StringCharAt)         \
+  V(String, fromCharCode, StringFromCharCode)       \
+  V(Math, floor, MathFloor)                         \
+  V(Math, round, MathRound)                         \
+  V(Math, ceil, MathCeil)                           \
+  V(Math, abs, MathAbs)                             \
+  V(Math, log, MathLog)                             \
+  V(Math, sin, MathSin)                             \
+  V(Math, cos, MathCos)                             \
+  V(Math, tan, MathTan)                             \
+  V(Math, asin, MathASin)                           \
+  V(Math, acos, MathACos)                           \
+  V(Math, atan, MathATan)                           \
+  V(Math, exp, MathExp)                             \
+  V(Math, sqrt, MathSqrt)                           \
+  V(Math, pow, MathPow)
+
+
+enum BuiltinFunctionId {
+#define DECLARE_FUNCTION_ID(ignored1, ignore2, name)    \
+  k##name,
+  FUNCTIONS_WITH_ID_LIST(DECLARE_FUNCTION_ID)
+#undef DECLARE_FUNCTION_ID
+  // Fake id for a special case of Math.pow. Note, it continues the
+  // list of math functions.
+  kMathPowHalf,
+  kFirstMathFunctionId = kMathFloor
+};
+
+
 // SharedFunctionInfo describes the JSFunction information that can be
 // shared by multiple instances of the function.
 class SharedFunctionInfo: public HeapObject {
@@ -3623,7 +4018,7 @@
 
   // [function data]: This field holds some additional data for function.
   // Currently it either has FunctionTemplateInfo to make benefit the API
-  // or Smi identifying a custom call generator.
+  // or Smi identifying a builtin function.
   // In the long run we don't want all functions to have this field but
   // we can fix that when we have a better model for storing hidden data
   // on objects.
@@ -3631,8 +4026,9 @@
 
   inline bool IsApiFunction();
   inline FunctionTemplateInfo* get_api_func_data();
-  inline bool HasCustomCallGenerator();
-  inline int custom_call_generator_id();
+  inline bool HasBuiltinFunctionId();
+  inline bool IsBuiltinMathFunction();
+  inline BuiltinFunctionId builtin_function_id();
 
   // [script info]: Script from which the function originates.
   DECL_ACCESSORS(script, Object)
@@ -3687,6 +4083,11 @@
   inline int compiler_hints();
   inline void set_compiler_hints(int value);
 
+  // A counter used to determine when to stress the deoptimizer with a
+  // deopt.
+  inline Smi* deopt_counter();
+  inline void set_deopt_counter(Smi* counter);
+
   // Add information on assignments of the form this.x = ...;
   void SetThisPropertyAssignmentsInfo(
       bool has_only_simple_this_property_assignments,
@@ -3716,6 +4117,24 @@
   inline int code_age();
   inline void set_code_age(int age);
 
+  // Indicates whether optimizations have been disabled for this
+  // shared function info. If a function is repeatedly optimized or if
+  // we cannot optimize the function we disable optimization to avoid
+  // spending time attempting to optimize it again.
+  inline bool optimization_disabled();
+  inline void set_optimization_disabled(bool value);
+
+  // Indicates whether or not the code in the shared function support
+  // deoptimization.
+  inline bool has_deoptimization_support();
+
+  // Enable deoptimization support through recompiled code.
+  void EnableDeoptimizationSupport(Code* recompiled);
+
+  // Lookup the bailout ID and ASSERT that it exists in the non-optimized
+  // code, returns whether it asserted (i.e., always true if assertions are
+  // disabled).
+  bool VerifyBailoutId(int id);
 
   // Check whether a inlined constructor can be generated with the given
   // prototype.
@@ -3739,6 +4158,12 @@
   bool HasSourceCode();
   Object* GetSourceCode();
 
+  inline int opt_count();
+  inline void set_opt_count(int opt_count);
+
+  // Source size of this function.
+  int SourceSize();
+
   // Calculate the instance size.
   int CalculateInstanceSize();
 
@@ -3748,8 +4173,13 @@
   // Dispatched behavior.
   // Set max_length to -1 for unlimited length.
   void SourceCodePrint(StringStream* accumulator, int max_length);
+#ifdef OBJECT_PRINT
+  inline void SharedFunctionInfoPrint() {
+    SharedFunctionInfoPrint(stdout);
+  }
+  void SharedFunctionInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void SharedFunctionInfoPrint();
   void SharedFunctionInfoVerify();
 #endif
 
@@ -3776,10 +4206,12 @@
       kInferredNameOffset + kPointerSize;
   static const int kThisPropertyAssignmentsOffset =
       kInitialMapOffset + kPointerSize;
+  static const int kDeoptCounterOffset =
+      kThisPropertyAssignmentsOffset + kPointerSize;
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
   static const int kLengthOffset =
-      kThisPropertyAssignmentsOffset + kPointerSize;
+      kDeoptCounterOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -3795,8 +4227,10 @@
       kFunctionTokenPositionOffset + kPointerSize;
   static const int kThisPropertyAssignmentsCountOffset =
       kCompilerHintsOffset + kPointerSize;
+  static const int kOptCountOffset =
+      kThisPropertyAssignmentsCountOffset + kPointerSize;
   // Total size.
-  static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+  static const int kSize = kOptCountOffset + kPointerSize;
 #else
   // The only reason to use smi fields instead of int fields
   // is to allow iteration without maps decoding during
@@ -3808,7 +4242,7 @@
   // word is not set and thus this word cannot be treated as pointer
   // to HeapObject during old space traversal.
   static const int kLengthOffset =
-      kThisPropertyAssignmentsOffset + kPointerSize;
+      kDeoptCounterOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -3829,9 +4263,11 @@
 
   static const int kThisPropertyAssignmentsCountOffset =
       kCompilerHintsOffset + kIntSize;
+  static const int kOptCountOffset =
+      kThisPropertyAssignmentsCountOffset + kIntSize;
 
   // Total size.
-  static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
+  static const int kSize = kOptCountOffset + kIntSize;
 
 #endif
 
@@ -3867,7 +4303,8 @@
   static const int kAllowLazyCompilation = 2;
   static const int kLiveObjectsMayExist = 3;
   static const int kCodeAgeShift = 4;
-  static const int kCodeAgeMask = 7;
+  static const int kCodeAgeMask = 0x7;
+  static const int kOptimizationDisabled = 7;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
 };
@@ -3895,13 +4332,34 @@
   // [[Call]] and [[Construct]] description in ECMA-262, section
   // 8.6.2, page 27.
   inline Code* code();
-  inline void set_code(Code* value);
+  inline void set_code(Code* code);
+  inline void ReplaceCode(Code* code);
 
   inline Code* unchecked_code();
 
   // Tells whether this function is builtin.
   inline bool IsBuiltin();
 
+  // Tells whether or not the function needs arguments adaption.
+  inline bool NeedsArgumentsAdaption();
+
+  // Tells whether or not this function has been optimized.
+  inline bool IsOptimized();
+
+  // Mark this function for lazy recompilation. The function will be
+  // recompiled the next time it is executed.
+  void MarkForLazyRecompilation();
+
+  // Tells whether or not the function is already marked for lazy
+  // recompilation.
+  inline bool IsMarkedForLazyRecompilation();
+
+  // Compute a hash code for the source code of this function.
+  uint32_t SourceHash();
+
+  // Check whether or not this function is inlineable.
+  bool IsInlineable();
+
   // [literals]: Fixed array holding the materialized literals.
   //
   // If the function contains object, regexp or array literals, the
@@ -3948,6 +4406,16 @@
   // Returns if this function has been compiled to native code yet.
   inline bool is_compiled();
 
+  // [next_function_link]: Field for linking functions. This list is treated as
+  // a weak list by the GC.
+  DECL_ACCESSORS(next_function_link, Object)
+
+  // Prints the name of the function using PrintF.
+  inline void PrintName() {
+    PrintName(stdout);
+  }
+  void PrintName(FILE* out);
+
   // Casting.
   static inline JSFunction* cast(Object* obj);
 
@@ -3956,8 +4424,13 @@
   void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSFunctionPrint() {
+    JSFunctionPrint(stdout);
+  }
+  void JSFunctionPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSFunctionPrint();
   void JSFunctionVerify();
 #endif
 
@@ -3967,7 +4440,8 @@
   // Retrieve the global context from a function's literal array.
   static Context* GlobalContextFromLiterals(FixedArray* literals);
 
-  // Layout descriptors.
+  // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
+  // kSize) is weak and has special handling during garbage collection.
   static const int kCodeEntryOffset = JSObject::kHeaderSize;
   static const int kPrototypeOrInitialMapOffset =
       kCodeEntryOffset + kPointerSize;
@@ -3975,7 +4449,9 @@
       kPrototypeOrInitialMapOffset + kPointerSize;
   static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
   static const int kLiteralsOffset = kContextOffset + kPointerSize;
-  static const int kSize = kLiteralsOffset + kPointerSize;
+  static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
+  static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
+  static const int kSize = kNextFunctionLinkOffset + kPointerSize;
 
   // Layout of the literals array.
   static const int kLiteralsPrefixSize = 1;
@@ -4003,8 +4479,13 @@
   static inline JSGlobalProxy* cast(Object* obj);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSGlobalProxyPrint() {
+    JSGlobalProxyPrint(stdout);
+  }
+  void JSGlobalProxyPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSGlobalProxyPrint();
   void JSGlobalProxyVerify();
 #endif
 
@@ -4020,6 +4501,7 @@
 
 // Forward declaration.
 class JSBuiltinsObject;
+class JSGlobalPropertyCell;
 
 // Common super class for JavaScript global objects and the special
 // builtins global objects.
@@ -4035,7 +4517,7 @@
   DECL_ACCESSORS(global_receiver, JSObject)
 
   // Retrieve the property cell used to store a property.
-  Object* GetPropertyCell(LookupResult* result);
+  JSGlobalPropertyCell* GetPropertyCell(LookupResult* result);
 
   // This is like GetProperty, but is used when you know the lookup won't fail
   // by throwing an exception.  This is for the debug and builtins global
@@ -4073,8 +4555,13 @@
   static inline JSGlobalObject* cast(Object* obj);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSGlobalObjectPrint() {
+    JSGlobalObjectPrint(stdout);
+  }
+  void JSGlobalObjectPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSGlobalObjectPrint();
   void JSGlobalObjectVerify();
 #endif
 
@@ -4102,8 +4589,13 @@
   static inline JSBuiltinsObject* cast(Object* obj);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSBuiltinsObjectPrint() {
+    JSBuiltinsObjectPrint(stdout);
+  }
+  void JSBuiltinsObjectPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSBuiltinsObjectPrint();
   void JSBuiltinsObjectVerify();
 #endif
 
@@ -4140,8 +4632,13 @@
   static inline JSValue* cast(Object* obj);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSValuePrint() {
+    JSValuePrint(stdout);
+  }
+  void JSValuePrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSValuePrint();
   void JSValueVerify();
 #endif
 
@@ -4297,6 +4794,9 @@
   MaybeObject* PutEval(String* src, Context* context, Object* value);
   MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
 
+  // Remove given value from cache.
+  void Remove(Object* value);
+
   static inline CompilationCacheTable* cast(Object* obj);
 
  private:
@@ -4327,8 +4827,13 @@
 
   static inline CodeCache* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void CodeCachePrint() {
+    CodeCachePrint(stdout);
+  }
+  void CodeCachePrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void CodeCachePrint();
   void CodeCacheVerify();
 #endif
 
@@ -4629,8 +5134,13 @@
 
   // Dispatched behavior.
   void StringShortPrint(StringStream* accumulator);
+#ifdef OBJECT_PRINT
+  inline void StringPrint() {
+    StringPrint(stdout);
+  }
+  void StringPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void StringPrint();
   void StringVerify();
 #endif
   inline bool IsFlat();
@@ -5185,7 +5695,12 @@
 
 #ifdef DEBUG
   void JSGlobalPropertyCellVerify();
-  void JSGlobalPropertyCellPrint();
+#endif
+#ifdef OBJECT_PRINT
+  inline void JSGlobalPropertyCellPrint() {
+    JSGlobalPropertyCellPrint(stdout);
+  }
+  void JSGlobalPropertyCellPrint(FILE* out);
 #endif
 
   // Layout description.
@@ -5220,8 +5735,13 @@
   template<typename StaticVisitor>
   inline void ProxyIterateBody();
 
+#ifdef OBJECT_PRINT
+  inline void ProxyPrint() {
+    ProxyPrint(stdout);
+  }
+  void ProxyPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ProxyPrint();
   void ProxyVerify();
 #endif
 
@@ -5270,8 +5790,13 @@
   inline void EnsureSize(int minimum_size_of_backing_fixed_array);
 
   // Dispatched behavior.
+#ifdef OBJECT_PRINT
+  inline void JSArrayPrint() {
+    JSArrayPrint(stdout);
+  }
+  void JSArrayPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void JSArrayPrint();
   void JSArrayVerify();
 #endif
 
@@ -5342,8 +5867,13 @@
 
   static inline AccessorInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void AccessorInfoPrint() {
+    AccessorInfoPrint(stdout);
+  }
+  void AccessorInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void AccessorInfoPrint();
   void AccessorInfoVerify();
 #endif
 
@@ -5373,8 +5903,13 @@
 
   static inline AccessCheckInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void AccessCheckInfoPrint() {
+    AccessCheckInfoPrint(stdout);
+  }
+  void AccessCheckInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void AccessCheckInfoPrint();
   void AccessCheckInfoVerify();
 #endif
 
@@ -5399,8 +5934,13 @@
 
   static inline InterceptorInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void InterceptorInfoPrint() {
+    InterceptorInfoPrint(stdout);
+  }
+  void InterceptorInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void InterceptorInfoPrint();
   void InterceptorInfoVerify();
 #endif
 
@@ -5424,8 +5964,13 @@
 
   static inline CallHandlerInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void CallHandlerInfoPrint() {
+    CallHandlerInfoPrint(stdout);
+  }
+  void CallHandlerInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void CallHandlerInfoPrint();
   void CallHandlerInfoVerify();
 #endif
 
@@ -5481,8 +6026,13 @@
 
   static inline FunctionTemplateInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void FunctionTemplateInfoPrint() {
+    FunctionTemplateInfoPrint(stdout);
+  }
+  void FunctionTemplateInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void FunctionTemplateInfoPrint();
   void FunctionTemplateInfoVerify();
 #endif
 
@@ -5524,8 +6074,13 @@
 
   static inline ObjectTemplateInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void ObjectTemplateInfoPrint() {
+    ObjectTemplateInfoPrint(stdout);
+  }
+  void ObjectTemplateInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void ObjectTemplateInfoPrint();
   void ObjectTemplateInfoVerify();
 #endif
 
@@ -5543,8 +6098,13 @@
 
   static inline SignatureInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void SignatureInfoPrint() {
+    SignatureInfoPrint(stdout);
+  }
+  void SignatureInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void SignatureInfoPrint();
   void SignatureInfoVerify();
 #endif
 
@@ -5563,8 +6123,13 @@
 
   static inline TypeSwitchInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void TypeSwitchInfoPrint() {
+    TypeSwitchInfoPrint(stdout);
+  }
+  void TypeSwitchInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void TypeSwitchInfoPrint();
   void TypeSwitchInfoVerify();
 #endif
 
@@ -5610,8 +6175,13 @@
 
   static inline DebugInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void DebugInfoPrint() {
+    DebugInfoPrint(stdout);
+  }
+  void DebugInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void DebugInfoPrint();
   void DebugInfoVerify();
 #endif
 
@@ -5663,8 +6233,13 @@
 
   static inline BreakPointInfo* cast(Object* obj);
 
+#ifdef OBJECT_PRINT
+  inline void BreakPointInfoPrint() {
+    BreakPointInfoPrint(stdout);
+  }
+  void BreakPointInfoPrint(FILE* out);
+#endif
 #ifdef DEBUG
-  void BreakPointInfoPrint();
   void BreakPointInfoVerify();
 #endif
 
@@ -5705,6 +6280,9 @@
   // Visits a code entry in a JS function.
   virtual void VisitCodeEntry(Address entry_address);
 
+  // Visits a global property cell reference in the instruction stream.
+  virtual void VisitGlobalPropertyCell(RelocInfo* rinfo);
+
   // Visits a runtime entry in the instruction stream.
   virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
 
diff --git a/src/parser.cc b/src/parser.cc
index 186d102..08f77b8 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -593,7 +593,9 @@
       allow_natives_syntax_(allow_natives_syntax),
       extension_(extension),
       pre_data_(pre_data),
-      fni_(NULL) {
+      fni_(NULL),
+      stack_overflow_(false) {
+  AstNode::ResetIds();
 }
 
 
@@ -607,7 +609,25 @@
 
   // Initialize parser state.
   source->TryFlatten();
-  scanner_.Initialize(source);
+  if (source->IsExternalTwoByteString()) {
+    // Notice that the stream is destroyed at the end of the branch block.
+    // The last line of the blocks can't be moved outside, even though they're
+    // identical calls.
+    ExternalTwoByteStringUC16CharacterStream stream(
+        Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+    scanner_.Initialize(&stream, JavaScriptScanner::kAllLiterals);
+    return DoParseProgram(source, in_global_context, &zone_scope);
+  } else {
+    GenericStringUC16CharacterStream stream(source, 0, source->length());
+    scanner_.Initialize(&stream, JavaScriptScanner::kAllLiterals);
+    return DoParseProgram(source, in_global_context, &zone_scope);
+  }
+}
+
+
+FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
+                                        bool in_global_context,
+                                        ZoneScope* zone_scope) {
   ASSERT(target_stack_ == NULL);
   if (pre_data_ != NULL) pre_data_->Initialize();
 
@@ -643,7 +663,7 @@
           source->length(),
           false,
           temp_scope.ContainsLoops());
-    } else if (scanner().stack_overflow()) {
+    } else if (stack_overflow_) {
       Top::StackOverflow();
     }
   }
@@ -653,25 +673,45 @@
 
   // If there was a syntax error we have to get rid of the AST
   // and it is not safe to do so before the scope has been deleted.
-  if (result == NULL) zone_scope.DeleteOnExit();
+  if (result == NULL) zone_scope->DeleteOnExit();
   return result;
 }
 
-
 FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info) {
   CompilationZoneScope zone_scope(DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(&Counters::parse_lazy);
   Handle<String> source(String::cast(script_->source()));
   Counters::total_parse_size.Increment(source->length());
 
+  // Initialize parser state.
+  source->TryFlatten();
+  if (source->IsExternalTwoByteString()) {
+    ExternalTwoByteStringUC16CharacterStream stream(
+        Handle<ExternalTwoByteString>::cast(source),
+        info->start_position(),
+        info->end_position());
+    FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
+    return result;
+  } else {
+    GenericStringUC16CharacterStream stream(source,
+                                            info->start_position(),
+                                            info->end_position());
+    FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
+    return result;
+  }
+}
+
+
+FunctionLiteral* Parser::ParseLazy(Handle<SharedFunctionInfo> info,
+                                   UC16CharacterStream* source,
+                                   ZoneScope* zone_scope) {
+  scanner_.Initialize(source, JavaScriptScanner::kAllLiterals);
+  ASSERT(target_stack_ == NULL);
+
   Handle<String> name(String::cast(info->name()));
   fni_ = new FuncNameInferrer();
   fni_->PushEnclosingName(name);
 
-  // Initialize parser state.
-  source->TryFlatten();
-  scanner_.Initialize(source, info->start_position(), info->end_position());
-  ASSERT(target_stack_ == NULL);
   mode_ = PARSE_EAGERLY;
 
   // Place holder for the result.
@@ -693,7 +733,7 @@
     // Make sure the results agree.
     ASSERT(ok == (result != NULL));
     // The only errors should be stack overflows.
-    ASSERT(ok || scanner_.stack_overflow());
+    ASSERT(ok || stack_overflow_);
   }
 
   // Make sure the target stack is empty.
@@ -703,7 +743,10 @@
   // not safe to do before scope has been deleted.
   if (result == NULL) {
     Top::StackOverflow();
-    zone_scope.DeleteOnExit();
+    zone_scope->DeleteOnExit();
+  } else {
+    Handle<String> inferred_name(info->inferred_name());
+    result->set_inferred_name(inferred_name);
   }
   return result;
 }
@@ -714,12 +757,12 @@
   if (pre_data() != NULL) {
     symbol_id = pre_data()->GetSymbolIdentifier();
   }
-  return LookupSymbol(symbol_id, scanner_.literal());
+  return LookupSymbol(symbol_id, scanner().literal());
 }
 
 
 void Parser::ReportMessage(const char* type, Vector<const char*> args) {
-  Scanner::Location source_location = scanner_.location();
+  Scanner::Location source_location = scanner().location();
   ReportMessageAt(source_location, type, args);
 }
 
@@ -1636,7 +1679,7 @@
   Expect(Token::CONTINUE, CHECK_OK);
   Handle<String> label = Handle<String>::null();
   Token::Value tok = peek();
-  if (!scanner_.has_line_terminator_before_next() &&
+  if (!scanner().has_line_terminator_before_next() &&
       tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
     label = ParseIdentifier(CHECK_OK);
   }
@@ -1662,7 +1705,7 @@
   Expect(Token::BREAK, CHECK_OK);
   Handle<String> label;
   Token::Value tok = peek();
-  if (!scanner_.has_line_terminator_before_next() &&
+  if (!scanner().has_line_terminator_before_next() &&
       tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
     label = ParseIdentifier(CHECK_OK);
   }
@@ -1707,7 +1750,7 @@
   }
 
   Token::Value tok = peek();
-  if (scanner_.has_line_terminator_before_next() ||
+  if (scanner().has_line_terminator_before_next() ||
       tok == Token::SEMICOLON ||
       tok == Token::RBRACE ||
       tok == Token::EOS) {
@@ -1793,7 +1836,7 @@
     *default_seen_ptr = true;
   }
   Expect(Token::COLON, CHECK_OK);
-
+  int pos = scanner().location().beg_pos;
   ZoneList<Statement*>* statements = new ZoneList<Statement*>(5);
   while (peek() != Token::CASE &&
          peek() != Token::DEFAULT &&
@@ -1802,7 +1845,7 @@
     statements->Add(stat);
   }
 
-  return new CaseClause(label, statements);
+  return new CaseClause(label, statements, pos);
 }
 
 
@@ -1839,7 +1882,7 @@
 
   Expect(Token::THROW, CHECK_OK);
   int pos = scanner().location().beg_pos;
-  if (scanner_.has_line_terminator_before_next()) {
+  if (scanner().has_line_terminator_before_next()) {
     ReportMessage("newline_after_throw", Vector<const char*>::empty());
     *ok = false;
     return NULL;
@@ -1874,7 +1917,7 @@
   }
 
   Block* catch_block = NULL;
-  VariableProxy* catch_var = NULL;
+  Variable* catch_var = NULL;
   Block* finally_block = NULL;
 
   Token::Value tok = peek();
@@ -1904,7 +1947,8 @@
       // executing the finally block.
       catch_var = top_scope_->NewTemporary(Factory::catch_var_symbol());
       Literal* name_literal = new Literal(name);
-      Expression* obj = new CatchExtensionObject(name_literal, catch_var);
+      VariableProxy* catch_var_use = new VariableProxy(catch_var);
+      Expression* obj = new CatchExtensionObject(name_literal, catch_var_use);
       { Target target(&this->target_stack_, &catch_collector);
         catch_block = WithHelper(obj, NULL, true, CHECK_OK);
       }
@@ -1928,8 +1972,9 @@
   //   'try { try { } catch { } } finally { }'
 
   if (catch_block != NULL && finally_block != NULL) {
+    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
     TryCatchStatement* statement =
-        new TryCatchStatement(try_block, catch_var, catch_block);
+        new TryCatchStatement(try_block, catch_var_defn, catch_block);
     statement->set_escaping_targets(collector.targets());
     try_block = new Block(NULL, 1, false);
     try_block->AddStatement(statement);
@@ -1939,7 +1984,8 @@
   TryStatement* result = NULL;
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
-    result = new TryCatchStatement(try_block, catch_var, catch_block);
+    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
+    result = new TryCatchStatement(try_block, catch_var_defn, catch_block);
     result->set_escaping_targets(collector.targets());
   } else {
     ASSERT(finally_block != NULL);
@@ -2400,7 +2446,8 @@
   //   LeftHandSideExpression ('++' | '--')?
 
   Expression* expression = ParseLeftHandSideExpression(CHECK_OK);
-  if (!scanner_.has_line_terminator_before_next() && Token::IsCountOp(peek())) {
+  if (!scanner().has_line_terminator_before_next() &&
+      Token::IsCountOp(peek())) {
     // Signal a reference error if the expression is an invalid
     // left-hand side expression.  We could report this as a syntax
     // error here but for compatibility with JSC we choose to report the
@@ -2590,25 +2637,24 @@
   // We don't report stack overflows here, to avoid increasing the
   // stack depth even further.  Instead we report it after parsing is
   // over, in ParseProgram/ParseJson.
-  if (token == Token::ILLEGAL && scanner().stack_overflow())
-    return;
+  if (token == Token::ILLEGAL && stack_overflow_) return;
   // Four of the tokens are treated specially
   switch (token) {
-  case Token::EOS:
-    return ReportMessage("unexpected_eos", Vector<const char*>::empty());
-  case Token::NUMBER:
-    return ReportMessage("unexpected_token_number",
-                         Vector<const char*>::empty());
-  case Token::STRING:
-    return ReportMessage("unexpected_token_string",
-                         Vector<const char*>::empty());
-  case Token::IDENTIFIER:
-    return ReportMessage("unexpected_token_identifier",
-                         Vector<const char*>::empty());
-  default:
-    const char* name = Token::String(token);
-    ASSERT(name != NULL);
-    ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
+    case Token::EOS:
+      return ReportMessage("unexpected_eos", Vector<const char*>::empty());
+    case Token::NUMBER:
+      return ReportMessage("unexpected_token_number",
+                           Vector<const char*>::empty());
+    case Token::STRING:
+      return ReportMessage("unexpected_token_string",
+                           Vector<const char*>::empty());
+    case Token::IDENTIFIER:
+      return ReportMessage("unexpected_token_identifier",
+                           Vector<const char*>::empty());
+    default:
+      const char* name = Token::String(token);
+      ASSERT(name != NULL);
+      ReportMessage("unexpected_token", Vector<const char*>(&name, 1));
   }
 }
 
@@ -2670,7 +2716,7 @@
     case Token::NUMBER: {
       Consume(Token::NUMBER);
       double value =
-        StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
+        StringToDouble(scanner().literal(), ALLOW_HEX | ALLOW_OCTALS);
       result = NewNumberLiteral(value);
       break;
     }
@@ -2814,6 +2860,7 @@
 
 
 bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
+  if (expression->AsLiteral() != NULL) return true;
   MaterializedLiteral* lit = expression->AsMaterializedLiteral();
   return lit != NULL && lit->is_simple();
 }
@@ -3020,7 +3067,7 @@
       case Token::NUMBER: {
         Consume(Token::NUMBER);
         double value =
-          StringToDouble(scanner_.literal(), ALLOW_HEX | ALLOW_OCTALS);
+          StringToDouble(scanner().literal(), ALLOW_HEX | ALLOW_OCTALS);
         key = NewNumberLiteral(value);
         break;
       }
@@ -3081,7 +3128,7 @@
 
 
 Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) {
-  if (!scanner_.ScanRegExpPattern(seen_equal)) {
+  if (!scanner().ScanRegExpPattern(seen_equal)) {
     Next();
     ReportMessage("unterminated_regexp", Vector<const char*>::empty());
     *ok = false;
@@ -3091,10 +3138,10 @@
   int literal_index = temp_scope_->NextMaterializedLiteralIndex();
 
   Handle<String> js_pattern =
-      Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
-  scanner_.ScanRegExpFlags();
+      Factory::NewStringFromUtf8(scanner().next_literal(), TENURED);
+  scanner().ScanRegExpFlags();
   Handle<String> js_flags =
-      Factory::NewStringFromUtf8(scanner_.next_literal(), TENURED);
+      Factory::NewStringFromUtf8(scanner().next_literal(), TENURED);
   Next();
 
   return new RegExpLiteral(js_pattern, js_flags, literal_index);
@@ -3150,7 +3197,7 @@
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
-    int start_pos = scanner_.location().beg_pos;
+    int start_pos = scanner().location().beg_pos;
     bool done = (peek() == Token::RPAREN);
     while (!done) {
       Handle<String> param_name = ParseIdentifier(CHECK_OK);
@@ -3187,7 +3234,7 @@
     bool is_lazily_compiled =
         mode() == PARSE_LAZILY && top_scope_->HasTrivialOuterContext();
 
-    int function_block_pos = scanner_.location().beg_pos;
+    int function_block_pos = scanner().location().beg_pos;
     int materialized_literal_count;
     int expected_property_count;
     int end_pos;
@@ -3204,7 +3251,8 @@
         ReportInvalidPreparseData(name, CHECK_OK);
       }
       Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
-      scanner_.SeekForward(end_pos);
+      // Seek to position just before terminal '}'.
+      scanner().SeekForward(end_pos - 1);
       materialized_literal_count = entry.literal_count();
       expected_property_count = entry.property_count();
       only_simple_this_property_assignments = false;
@@ -3220,7 +3268,7 @@
       this_property_assignments = temp_scope.this_property_assignments();
 
       Expect(Token::RBRACE, CHECK_OK);
-      end_pos = scanner_.location().end_pos;
+      end_pos = scanner().location().end_pos;
     }
 
     FunctionLiteral* function_literal =
@@ -3324,7 +3372,7 @@
     Next();
     return;
   }
-  if (scanner_.has_line_terminator_before_next() ||
+  if (scanner().has_line_terminator_before_next() ||
       tok == Token::RBRACE ||
       tok == Token::EOS) {
     return;
@@ -3375,8 +3423,8 @@
                                                  bool* ok) {
   Expect(Token::IDENTIFIER, ok);
   if (!*ok) return Handle<String>();
-  if (scanner_.literal_length() == 3) {
-    const char* token = scanner_.literal_string();
+  if (scanner().literal_length() == 3) {
+    const char* token = scanner().literal_string();
     *is_get = strcmp(token, "get") == 0;
     *is_set = !*is_get && strcmp(token, "set") == 0;
   }
@@ -3495,12 +3543,13 @@
 // ----------------------------------------------------------------------------
 // JSON
 
-Handle<Object> JsonParser::ParseJson(Handle<String> source) {
-  source->TryFlatten();
+Handle<Object> JsonParser::ParseJson(Handle<String> script,
+                                     UC16CharacterStream* source) {
   scanner_.Initialize(source);
+  stack_overflow_ = false;
   Handle<Object> result = ParseJsonValue();
   if (result.is_null() || scanner_.Next() != Token::EOS) {
-    if (scanner_.stack_overflow()) {
+    if (stack_overflow_) {
       // Scanner failed.
       Top::StackOverflow();
     } else {
@@ -3531,7 +3580,7 @@
       }
 
       Scanner::Location source_location = scanner_.location();
-      MessageLocation location(Factory::NewScript(source),
+      MessageLocation location(Factory::NewScript(script),
                                source_location.beg_pos,
                                source_location.end_pos);
       int argc = (name_opt == NULL) ? 0 : 1;
@@ -3598,6 +3647,10 @@
   if (scanner_.peek() == Token::RBRACE) {
     scanner_.Next();
   } else {
+    if (StackLimitCheck().HasOverflowed()) {
+      stack_overflow_ = true;
+      return Handle<Object>::null();
+    }
     do {
       if (scanner_.Next() != Token::STRING) {
         return ReportUnexpectedToken();
@@ -3632,6 +3685,10 @@
   if (token == Token::RBRACK) {
     scanner_.Next();
   } else {
+    if (StackLimitCheck().HasOverflowed()) {
+      stack_overflow_ = true;
+      return Handle<Object>::null();
+    }
     do {
       Handle<Object> element = ParseJsonValue();
       if (element.is_null()) return Handle<Object>::null();
@@ -3673,7 +3730,7 @@
     contains_anchor_(false),
     is_scanned_for_captures_(false),
     failed_(false) {
-  Advance(1);
+  Advance();
 }
 
 
@@ -3711,8 +3768,8 @@
 
 
 void RegExpParser::Advance(int dist) {
-  for (int i = 0; i < dist; i++)
-    Advance();
+  next_pos_ += dist - 1;
+  Advance();
 }
 
 
@@ -4392,6 +4449,22 @@
 }
 
 
+static const uc16 kNoCharClass = 0;
+
+// Adds range or pre-defined character class to character ranges.
+// If char_class is not kInvalidClass, it's interpreted as a class
+// escape (i.e., 's' means whitespace, from '\s').
+static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
+                                    uc16 char_class,
+                                    CharacterRange range) {
+  if (char_class != kNoCharClass) {
+    CharacterRange::AddClassEscape(char_class, ranges);
+  } else {
+    ranges->Add(range);
+  }
+}
+
+
 RegExpTree* RegExpParser::ParseCharacterClass() {
   static const char* kUnterminated = "Unterminated character class";
   static const char* kRangeOutOfOrder = "Range out of order in character class";
@@ -4405,12 +4478,8 @@
   }
   ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
   while (has_more() && current() != ']') {
-    uc16 char_class = 0;
+    uc16 char_class = kNoCharClass;
     CharacterRange first = ParseClassAtom(&char_class CHECK_FAILED);
-    if (char_class) {
-      CharacterRange::AddClassEscape(char_class, ranges);
-      continue;
-    }
     if (current() == '-') {
       Advance();
       if (current() == kEndMarker) {
@@ -4418,15 +4487,17 @@
         // following code report an error.
         break;
       } else if (current() == ']') {
-        ranges->Add(first);
+        AddRangeOrEscape(ranges, char_class, first);
         ranges->Add(CharacterRange::Singleton('-'));
         break;
       }
-      CharacterRange next = ParseClassAtom(&char_class CHECK_FAILED);
-      if (char_class) {
-        ranges->Add(first);
+      uc16 char_class_2 = kNoCharClass;
+      CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
+      if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
+        // Either end is an escaped character class. Treat the '-' verbatim.
+        AddRangeOrEscape(ranges, char_class, first);
         ranges->Add(CharacterRange::Singleton('-'));
-        CharacterRange::AddClassEscape(char_class, ranges);
+        AddRangeOrEscape(ranges, char_class_2, next);
         continue;
       }
       if (first.from() > next.to()) {
@@ -4434,7 +4505,7 @@
       }
       ranges->Add(CharacterRange::Range(first.from(), next.to()));
     } else {
-      ranges->Add(first);
+      AddRangeOrEscape(ranges, char_class, first);
     }
   }
   if (!has_more()) {
@@ -4524,15 +4595,17 @@
 
 
 // Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(Handle<String> source,
-                                  unibrow::CharacterStream* stream,
+static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
                                   bool allow_lazy,
                                   ParserRecorder* recorder,
                                   int literal_flags) {
   V8JavaScriptScanner scanner;
-  scanner.Initialize(source, stream, literal_flags);
-  preparser::PreParser preparser;
-  if (!preparser.PreParseProgram(&scanner, recorder, allow_lazy)) {
+  scanner.Initialize(source, literal_flags);
+  intptr_t stack_limit = StackGuard::real_climit();
+  if (!preparser::PreParser::PreParseProgram(&scanner,
+                                             recorder,
+                                             allow_lazy,
+                                             stack_limit)) {
     Top::StackOverflow();
     return NULL;
   }
@@ -4546,8 +4619,7 @@
 
 // Preparse, but only collect data that is immediately useful,
 // even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
-                                           unibrow::CharacterStream* stream,
+ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
                                            v8::Extension* extension) {
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   if (!allow_lazy) {
@@ -4556,22 +4628,19 @@
     return NULL;
   }
   PartialParserRecorder recorder;
-
-  return DoPreParse(source, stream, allow_lazy, &recorder,
+  return DoPreParse(source, allow_lazy, &recorder,
                     JavaScriptScanner::kNoLiterals);
 }
 
 
-ScriptDataImpl* ParserApi::PreParse(Handle<String> source,
-                                    unibrow::CharacterStream* stream,
+ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
                                     v8::Extension* extension) {
   Handle<Script> no_script;
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   CompleteParserRecorder recorder;
   int kPreParseLiteralsFlags =
       JavaScriptScanner::kLiteralString | JavaScriptScanner::kLiteralIdentifier;
-  return DoPreParse(source, stream, allow_lazy,
-                    &recorder, kPreParseLiteralsFlags);
+  return DoPreParse(source, allow_lazy, &recorder, kPreParseLiteralsFlags);
 }
 
 
diff --git a/src/parser.h b/src/parser.h
index a067bd7..70d0e18 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -169,14 +169,12 @@
   static bool Parse(CompilationInfo* info);
 
   // Generic preparser generating full preparse data.
-  static ScriptDataImpl* PreParse(Handle<String> source,
-                                  unibrow::CharacterStream* stream,
+  static ScriptDataImpl* PreParse(UC16CharacterStream* source,
                                   v8::Extension* extension);
 
   // Preparser that only does preprocessing that makes sense if only used
   // immediately after.
-  static ScriptDataImpl* PartialPreParse(Handle<String> source,
-                                         unibrow::CharacterStream* stream,
+  static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
                                          v8::Extension* extension);
 };
 
@@ -435,18 +433,26 @@
                        Vector<const char*> args);
 
  protected:
+  FunctionLiteral* ParseLazy(Handle<SharedFunctionInfo> info,
+                             UC16CharacterStream* source,
+                             ZoneScope* zone_scope);
   enum Mode {
     PARSE_LAZILY,
     PARSE_EAGERLY
   };
 
+  // Called by ParseProgram after setting up the scanner.
+  FunctionLiteral* DoParseProgram(Handle<String> source,
+                                  bool in_global_context,
+                                  ZoneScope* zone_scope);
+
   // Report syntax error
   void ReportUnexpectedToken(Token::Value token);
   void ReportInvalidPreparseData(Handle<String> name, bool* ok);
   void ReportMessage(const char* message, Vector<const char*> args);
 
   bool inside_with() const { return with_nesting_level_ > 0; }
-  Scanner& scanner()  { return scanner_; }
+  V8JavaScriptScanner& scanner()  { return scanner_; }
   Mode mode() const { return mode_; }
   ScriptDataImpl* pre_data() const { return pre_data_; }
 
@@ -546,8 +552,27 @@
   // Magical syntax support.
   Expression* ParseV8Intrinsic(bool* ok);
 
-  INLINE(Token::Value peek()) { return scanner_.peek(); }
-  INLINE(Token::Value Next()) { return scanner_.NextCheckStack(); }
+  INLINE(Token::Value peek()) {
+    if (stack_overflow_) return Token::ILLEGAL;
+    return scanner().peek();
+  }
+
+  INLINE(Token::Value Next()) {
+    // BUG 1215673: Find a thread safe way to set a stack limit in
+    // pre-parse mode. Otherwise, we cannot safely pre-parse from other
+    // threads.
+    if (stack_overflow_) {
+      return Token::ILLEGAL;
+    }
+    if (StackLimitCheck().HasOverflowed()) {
+      // Any further calls to Next or peek will return the illegal token.
+      // The current call must return the next token, which might already
+      // have been peek'ed.
+      stack_overflow_ = true;
+    }
+    return scanner().Next();
+  }
+
   INLINE(void Consume(Token::Value token));
   void Expect(Token::Value token, bool* ok);
   bool Check(Token::Value token);
@@ -639,6 +664,7 @@
   bool is_pre_parsing_;
   ScriptDataImpl* pre_data_;
   FuncNameInferrer* fni_;
+  bool stack_overflow_;
 };
 
 
@@ -684,7 +710,14 @@
   // Parse JSON input as a single JSON value.
   // Returns null handle and sets exception if parsing failed.
   static Handle<Object> Parse(Handle<String> source) {
-    return JsonParser().ParseJson(source);
+    if (source->IsExternalTwoByteString()) {
+      ExternalTwoByteStringUC16CharacterStream stream(
+          Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+      return JsonParser().ParseJson(source, &stream);
+    } else {
+      GenericStringUC16CharacterStream stream(source, 0, source->length());
+      return JsonParser().ParseJson(source, &stream);
+    }
   }
 
  private:
@@ -692,7 +725,7 @@
   ~JsonParser() { }
 
   // Parse a string containing a single JSON value.
-  Handle<Object> ParseJson(Handle<String>);
+  Handle<Object> ParseJson(Handle<String> script, UC16CharacterStream* source);
   // Parse a single JSON value from input (grammar production JSONValue).
   // A JSON value is either a (double-quoted) string literal, a number literal,
   // one of "true", "false", or "null", or an object or array literal.
@@ -718,6 +751,7 @@
   Handle<String> GetString();
 
   JsonScanner scanner_;
+  bool stack_overflow_;
 };
 } }  // namespace v8::internal
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 0d89a16..b58d066 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -53,6 +53,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -616,10 +617,9 @@
 };
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData();
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index cc7cbe5..7efb25d 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -59,6 +59,7 @@
 #include "platform.h"
 #include "top.h"
 #include "v8threads.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -184,21 +185,10 @@
 }
 
 
-#ifdef V8_TARGET_ARCH_ARM
-// 0xffff0fa0 is the hard coded address of a function provided by
-// the kernel which implements a memory barrier. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
-    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
-#endif
-
 void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
 #if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
   // Only use on ARM hardware.
-  pLinuxKernelMemoryBarrier();
+  MemoryBarrier();
 #else
   __asm__ __volatile__("" : : : "memory");
   // An x86 store acts as a release barrier.
@@ -650,6 +640,16 @@
     return result;
   }
 
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
+
  private:
   pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
 };
@@ -733,6 +733,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 static Sampler* active_sampler_ = NULL;
+static int vm_tid_ = 0;
 
 
 #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
@@ -761,50 +762,51 @@
 #endif
 
 
+static int GetThreadID() {
+  // Glibc doesn't provide a wrapper for gettid(2).
+  return syscall(SYS_gettid);
+}
+
+
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
 #ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
-  if (active_sampler_ == NULL) return;
+  if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
+  if (vm_tid_ != GetThreadID()) return;
 
   TickSample sample_obj;
   TickSample* sample = CpuProfiler::TickSampleEvent();
   if (sample == NULL) sample = &sample_obj;
 
-  // We always sample the VM state.
-  sample->state = VMState::current_state();
-
-  // If profiling, we extract the current pc and sp.
-  if (active_sampler_->IsProfiling()) {
-    // Extracting the sample from the context is extremely machine dependent.
-    ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-    mcontext_t& mcontext = ucontext->uc_mcontext;
+  // Extracting the sample from the context is extremely machine dependent.
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+  mcontext_t& mcontext = ucontext->uc_mcontext;
+  sample->state = Top::current_vm_state();
 #if V8_HOST_ARCH_IA32
-    sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
-    sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
-    sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
 #elif V8_HOST_ARCH_X64
-    sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
-    sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
-    sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
 #elif V8_HOST_ARCH_ARM
 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-    sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
-    sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
-    sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+  sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+  sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+  sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
 #else
-    sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
-    sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
-    sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
+  sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+  sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+  sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
 #endif
 #elif V8_HOST_ARCH_MIPS
-    // Implement this on MIPS.
-    UNIMPLEMENTED();
+  // Implement this on MIPS.
+  UNIMPLEMENTED();
 #endif
-    active_sampler_->SampleStack(sample);
-  }
-
+  active_sampler_->SampleStack(sample);
   active_sampler_->Tick(sample);
 #endif
 }
@@ -812,43 +814,64 @@
 
 class Sampler::PlatformData : public Malloced {
  public:
+  enum SleepInterval {
+    FULL_INTERVAL,
+    HALF_INTERVAL
+  };
+
   explicit PlatformData(Sampler* sampler)
       : sampler_(sampler),
         signal_handler_installed_(false),
         vm_tgid_(getpid()),
-        // Glibc doesn't provide a wrapper for gettid(2).
-        vm_tid_(syscall(SYS_gettid)),
         signal_sender_launched_(false) {
   }
 
   void SignalSender() {
     while (sampler_->IsActive()) {
-      // Glibc doesn't provide a wrapper for tgkill(2).
-      syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
-      // Convert ms to us and subtract 100 us to compensate delays
-      // occuring during signal delivery.
-      const useconds_t interval = sampler_->interval_ * 1000 - 100;
-      int result = usleep(interval);
-#ifdef DEBUG
-      if (result != 0 && errno != EINTR) {
-        fprintf(stderr,
-                "SignalSender usleep error; interval = %u, errno = %d\n",
-                interval,
-                errno);
-        ASSERT(result == 0 || errno == EINTR);
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
+        SendProfilingSignal();
+        Sleep(HALF_INTERVAL);
+        RuntimeProfiler::NotifyTick();
+        Sleep(HALF_INTERVAL);
+      } else {
+        if (sampler_->IsProfiling()) SendProfilingSignal();
+        if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+        Sleep(FULL_INTERVAL);
       }
-#endif
-      USE(result);
     }
   }
 
+  void SendProfilingSignal() {
+    // Glibc doesn't provide a wrapper for tgkill(2).
+    syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+  }
+
+  void Sleep(SleepInterval full_or_half) {
+    // Convert ms to us and subtract 100 us to compensate delays
+    // occuring during signal delivery.
+    useconds_t interval = sampler_->interval_ * 1000 - 100;
+    if (full_or_half == HALF_INTERVAL) interval /= 2;
+    int result = usleep(interval);
+#ifdef DEBUG
+    if (result != 0 && errno != EINTR) {
+      fprintf(stderr,
+              "SignalSender usleep error; interval = %u, errno = %d\n",
+              interval,
+              errno);
+      ASSERT(result == 0 || errno == EINTR);
+    }
+#endif
+    USE(result);
+  }
+
   Sampler* sampler_;
   bool signal_handler_installed_;
   struct sigaction old_signal_handler_;
   int vm_tgid_;
-  int vm_tid_;
   bool signal_sender_launched_;
   pthread_t signal_sender_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 };
 
 
@@ -860,10 +883,9 @@
 }
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData(this);
@@ -879,7 +901,8 @@
 void Sampler::Start() {
   // There can only be one active sampler at the time on POSIX
   // platforms.
-  if (active_sampler_ != NULL) return;
+  ASSERT(!IsActive());
+  vm_tid_ = GetThreadID();
 
   // Request profiling signals.
   struct sigaction sa;
@@ -892,7 +915,7 @@
   // Start a thread that sends SIGPROF signal to VM thread.
   // Sending the signal ourselves instead of relying on itimer provides
   // much better accuracy.
-  active_ = true;
+  SetActive(true);
   if (pthread_create(
           &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
     data_->signal_sender_launched_ = true;
@@ -904,11 +927,12 @@
 
 
 void Sampler::Stop() {
-  active_ = false;
+  SetActive(false);
 
   // Wait for signal sender termination (it will exit after setting
   // active_ to false).
   if (data_->signal_sender_launched_) {
+    Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
     pthread_join(data_->signal_sender_thread_, NULL);
     data_->signal_sender_launched_ = false;
   }
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index c3f21dc..85c7088 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -57,6 +57,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 // Manually define these here as weak imports, rather than including execinfo.h.
 // This lets us launch on 10.4 which does not have these calls.
@@ -483,11 +484,20 @@
     pthread_mutex_init(&mutex_, &attr);
   }
 
-  ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
+  virtual ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
 
-  int Lock() { return pthread_mutex_lock(&mutex_); }
+  virtual int Lock() { return pthread_mutex_lock(&mutex_); }
+  virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
 
-  int Unlock() { return pthread_mutex_unlock(&mutex_); }
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
 
  private:
   pthread_mutex_t mutex_;
@@ -554,40 +564,38 @@
   mach_port_t task_self_;
   thread_act_t profiled_thread_;
   pthread_t sampler_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Sampler thread handler.
   void Runner() {
-    // Loop until the sampler is disengaged, keeping the specified
-    // sampling frequency.
-    for ( ; sampler_->IsActive(); OS::Sleep(sampler_->interval_)) {
+    while (sampler_->IsActive()) {
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      Sample();
+      OS::Sleep(sampler_->interval_);
+    }
+  }
+
+  void Sample() {
+    if (sampler_->IsProfiling()) {
       TickSample sample_obj;
       TickSample* sample = CpuProfiler::TickSampleEvent();
       if (sample == NULL) sample = &sample_obj;
 
-      // If the sampler runs in sync with the JS thread, we try to
-      // suspend it. If we fail, we skip the current sample.
-      if (sampler_->IsSynchronous()) {
-        if (KERN_SUCCESS != thread_suspend(profiled_thread_)) continue;
-      }
+      if (KERN_SUCCESS != thread_suspend(profiled_thread_)) return;
 
-      // We always sample the VM state.
-      sample->state = VMState::current_state();
-
-      // If profiling, we record the pc and sp of the profiled thread.
-      if (sampler_->IsProfiling()) {
 #if V8_HOST_ARCH_X64
-        thread_state_flavor_t flavor = x86_THREAD_STATE64;
-        x86_thread_state64_t state;
-        mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+      thread_state_flavor_t flavor = x86_THREAD_STATE64;
+      x86_thread_state64_t state;
+      mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
 #if __DARWIN_UNIX03
 #define REGISTER_FIELD(name) __r ## name
 #else
 #define REGISTER_FIELD(name) r ## name
 #endif  // __DARWIN_UNIX03
 #elif V8_HOST_ARCH_IA32
-        thread_state_flavor_t flavor = i386_THREAD_STATE;
-        i386_thread_state_t state;
-        mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+      thread_state_flavor_t flavor = i386_THREAD_STATE;
+      i386_thread_state_t state;
+      mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
 #if __DARWIN_UNIX03
 #define REGISTER_FIELD(name) __e ## name
 #else
@@ -597,24 +605,20 @@
 #error Unsupported Mac OS X host architecture.
 #endif  // V8_HOST_ARCH
 
-        if (thread_get_state(profiled_thread_,
-                             flavor,
-                             reinterpret_cast<natural_t*>(&state),
-                             &count) == KERN_SUCCESS) {
-          sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
-          sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
-          sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
-          sampler_->SampleStack(sample);
-        }
+      if (thread_get_state(profiled_thread_,
+                           flavor,
+                           reinterpret_cast<natural_t*>(&state),
+                           &count) == KERN_SUCCESS) {
+        sample->state = Top::current_vm_state();
+        sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+        sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+        sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+        sampler_->SampleStack(sample);
+        sampler_->Tick(sample);
       }
-
-      // Invoke tick handler with program counter and stack pointer.
-      sampler_->Tick(sample);
-
-      // If the sampler runs in sync with the JS thread, we have to
-      // remember to resume it.
-      if (sampler_->IsSynchronous()) thread_resume(profiled_thread_);
+      thread_resume(profiled_thread_);
     }
+    if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
   }
 };
 
@@ -630,10 +634,9 @@
 }
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData(this);
@@ -646,11 +649,9 @@
 
 
 void Sampler::Start() {
-  // If we are starting a synchronous sampler, we need to be able to
-  // access the calling thread.
-  if (IsSynchronous()) {
-    data_->profiled_thread_ = mach_thread_self();
-  }
+  // Do not start multiple threads for the same sampler.
+  ASSERT(!IsActive());
+  data_->profiled_thread_ = mach_thread_self();
 
   // Create sampler thread with high priority.
   // According to POSIX spec, when SCHED_FIFO policy is used, a thread
@@ -663,7 +664,7 @@
   fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO);
   pthread_attr_setschedparam(&sched_attr, &fifo_param);
 
-  active_ = true;
+  SetActive(true);
   pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_);
 }
 
@@ -671,15 +672,14 @@
 void Sampler::Stop() {
   // Seting active to false triggers termination of the sampler
   // thread.
-  active_ = false;
+  SetActive(false);
 
   // Wait for sampler thread to terminate.
+  Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
   pthread_join(data_->sampler_thread_, NULL);
 
   // Deallocate Mach port for thread.
-  if (IsSynchronous()) {
-    mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
-  }
+  mach_port_deallocate(data_->task_self_, data_->profiled_thread_);
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index b5caa5e..72ea0e5 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -35,6 +35,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -127,6 +128,19 @@
 }
 
 
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+  vfprintf(out, format, args);
+}
+
+
 // Print error message to console.
 void OS::PrintError(const char* format, ...) {
   // Minimalistic implementation for bootstrapping.
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 0751fc7..b698d16 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -52,6 +52,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 namespace v8 {
@@ -571,10 +572,9 @@
 };
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData();
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index c50d396..ab5c0a3 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -142,6 +142,23 @@
 }
 
 
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+#if defined(ANDROID)
+  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+  vfprintf(out, format, args);
+#endif
+}
+
+
 void OS::PrintError(const char* format, ...) {
   va_list args;
   va_start(args, format);
@@ -173,7 +190,9 @@
                   va_list args) {
   int n = vsnprintf(str.start(), str.length(), format, args);
   if (n < 0 || n >= str.length()) {
-    str[str.length() - 1] = '\0';
+    // If the length is zero, the assignment fails.
+    if (str.length() > 0)
+      str[str.length() - 1] = '\0';
     return -1;
   } else {
     return n;
@@ -204,6 +223,14 @@
   explicit POSIXSocket() {
     // Create the socket.
     socket_ = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+    if (IsValid()) {
+      // Allow rapid reuse.
+      static const int kOn = 1;
+      int ret = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
+                           &kOn, sizeof(kOn));
+      ASSERT(ret == 0);
+      USE(ret);
+    }
   }
   explicit POSIXSocket(int socket): socket_(socket) { }
   virtual ~POSIXSocket() { Shutdown(); }
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index ff5d83b..f84e80d 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -52,6 +52,7 @@
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 
 // It seems there is a bug in some Solaris distributions (experienced in
@@ -601,10 +602,9 @@
 };
 
 
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData();
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index c50424e..4438045 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -26,70 +26,14 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Platform specific code for Win32.
-#ifndef WIN32_LEAN_AND_MEAN
-// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
-#define WIN32_LEAN_AND_MEAN
-#endif
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#ifndef NOKERNEL
-#define NOKERNEL
-#endif
-#ifndef NOUSER
-#define NOUSER
-#endif
-#ifndef NOSERVICE
-#define NOSERVICE
-#endif
-#ifndef NOSOUND
-#define NOSOUND
-#endif
-#ifndef NOMCX
-#define NOMCX
-#endif
-// Require Windows XP or higher (this is required for the RtlCaptureContext
-// function to be present).
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif
 
-#include <windows.h>
-
-#include <time.h>  // For LocalOffset() implementation.
-#include <mmsystem.h>  // For timeGetTime().
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif  // __MINGW32__
-#ifndef __MINGW32__
-#include <dbghelp.h>  // For SymLoadModule64 and al.
-#endif  // __MINGW32__
-#include <limits.h>  // For INT_MAX and al.
-#include <tlhelp32.h>  // For Module32First and al.
-
-// These additional WIN32 includes have to be right here as the #undef's below
-// makes it impossible to have them elsewhere.
-#include <winsock2.h>
-#include <ws2tcpip.h>
-#include <process.h>  // for _beginthreadex()
-#include <stdlib.h>
-
-#undef VOID
-#undef DELETE
-#undef IN
-#undef THIS
-#undef CONST
-#undef NAN
-#undef GetObject
-#undef CreateMutex
-#undef CreateSemaphore
+#define V8_WIN32_HEADERS_FULL
+#include "win32-headers.h"
 
 #include "v8.h"
 
 #include "platform.h"
+#include "vm-state-inl.h"
 
 // Extra POSIX/ANSI routines for Win32 when when using Visual Studio C++. Please
 // refer to The Open Group Base Specification for specification of the correct
@@ -207,6 +151,12 @@
   return 0;
 }
 
+
+inline void MemoryBarrier() {
+  int barrier = 0;
+  __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
+}
+
 #endif  // __MINGW32__
 
 // Generate a pseudo-random number in the range 0-2^31-1. Usually
@@ -738,6 +688,19 @@
 }
 
 
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+  VPrintHelper(out, format, args);
+}
+
+
 // Print error message to console.
 void OS::PrintError(const char* format, ...) {
   va_list args;
@@ -766,7 +729,8 @@
   // Make sure to zero-terminate the string if the output was
   // truncated or if there was an error.
   if (n < 0 || n >= str.length()) {
-    str[str.length() - 1] = '\0';
+    if (str.length() > 0)
+      str[str.length() - 1] = '\0';
     return -1;
   } else {
     return n;
@@ -858,13 +822,14 @@
 
   // VirtualAlloc rounds allocated size to page size automatically.
   size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-  intptr_t address = NULL;
+  intptr_t address = 0;
 
   // Windows XP SP2 allows Data Excution Prevention (DEP).
   int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
 
   // For exectutable pages try and randomize the allocation address
-  if (prot == PAGE_EXECUTE_READWRITE && msize >= Page::kPageSize) {
+  if (prot == PAGE_EXECUTE_READWRITE &&
+      msize >= static_cast<size_t>(Page::kPageSize)) {
     address = (V8::RandomPrivate() << kPageSizeBits)
       | kAllocationRandomAddressMin;
     address &= kAllocationRandomAddressMax;
@@ -874,7 +839,7 @@
                               msize,
                               MEM_COMMIT | MEM_RESERVE,
                               prot);
-  if (mbase == NULL && address != NULL)
+  if (mbase == NULL && address != 0)
     mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
 
   if (mbase == NULL) {
@@ -1155,7 +1120,7 @@
   // Initialize the symbol engine.
   ok = _SymInitialize(process_handle,  // hProcess
                       NULL,            // UserSearchPath
-                      FALSE);          // fInvadeProcess
+                      false);          // fInvadeProcess
   if (!ok) return false;
 
   DWORD options = _SymGetOptions();
@@ -1347,6 +1312,7 @@
 
 #else  // __MINGW32__
 void OS::LogSharedLibraryAddresses() { }
+void OS::SignalCodeMovingGC() { }
 int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; }
 #endif  // __MINGW32__
 
@@ -1414,7 +1380,7 @@
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   ASSERT(IsReserved());
-  return VirtualFree(address, size, MEM_DECOMMIT) != FALSE;
+  return VirtualFree(address, size, MEM_DECOMMIT) != false;
 }
 
 
@@ -1574,18 +1540,24 @@
 
   Win32Mutex() { InitializeCriticalSection(&cs_); }
 
-  ~Win32Mutex() { DeleteCriticalSection(&cs_); }
+  virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
 
-  int Lock() {
+  virtual int Lock() {
     EnterCriticalSection(&cs_);
     return 0;
   }
 
-  int Unlock() {
+  virtual int Unlock() {
     LeaveCriticalSection(&cs_);
     return 0;
   }
 
+
+  virtual bool TryLock() {
+    // Returns non-zero if critical section is entered successfully entered.
+    return TryEnterCriticalSection(&cs_);
+  }
+
  private:
   CRITICAL_SECTION cs_;  // Critical section used for mutex
 };
@@ -1768,7 +1740,7 @@
 
 
 bool Win32Socket::SetReuseAddress(bool reuse_address) {
-  BOOL on = reuse_address ? TRUE : FALSE;
+  BOOL on = reuse_address ? true : false;
   int status = setsockopt(socket_, SOL_SOCKET, SO_REUSEADDR,
                           reinterpret_cast<char*>(&on), sizeof(on));
   return status == SOCKET_ERROR;
@@ -1838,53 +1810,48 @@
   Sampler* sampler_;
   HANDLE sampler_thread_;
   HANDLE profiled_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 
   // Sampler thread handler.
   void Runner() {
-    // Context used for sampling the register state of the profiled thread.
-    CONTEXT context;
-    memset(&context, 0, sizeof(context));
-    // Loop until the sampler is disengaged, keeping the specified
-    // sampling frequency.
-    for ( ; sampler_->IsActive(); Sleep(sampler_->interval_)) {
+    while (sampler_->IsActive()) {
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      Sample();
+      Sleep(sampler_->interval_);
+    }
+  }
+
+  void Sample() {
+    if (sampler_->IsProfiling()) {
+      // Context used for sampling the register state of the profiled thread.
+      CONTEXT context;
+      memset(&context, 0, sizeof(context));
+
       TickSample sample_obj;
       TickSample* sample = CpuProfiler::TickSampleEvent();
       if (sample == NULL) sample = &sample_obj;
 
-      // If the sampler runs in sync with the JS thread, we try to
-      // suspend it. If we fail, we skip the current sample.
-      if (sampler_->IsSynchronous()) {
-        static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
-        if (SuspendThread(profiled_thread_) == kSuspendFailed) continue;
-      }
+      static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+      if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
+      sample->state = Top::current_vm_state();
 
-      // We always sample the VM state.
-      sample->state = VMState::current_state();
-
-      // If profiling, we record the pc and sp of the profiled thread.
-      if (sampler_->IsProfiling()) {
-        context.ContextFlags = CONTEXT_FULL;
-        if (GetThreadContext(profiled_thread_, &context) != 0) {
+      context.ContextFlags = CONTEXT_FULL;
+      if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
-          sample->pc = reinterpret_cast<Address>(context.Rip);
-          sample->sp = reinterpret_cast<Address>(context.Rsp);
-          sample->fp = reinterpret_cast<Address>(context.Rbp);
+        sample->pc = reinterpret_cast<Address>(context.Rip);
+        sample->sp = reinterpret_cast<Address>(context.Rsp);
+        sample->fp = reinterpret_cast<Address>(context.Rbp);
 #else
-          sample->pc = reinterpret_cast<Address>(context.Eip);
-          sample->sp = reinterpret_cast<Address>(context.Esp);
-          sample->fp = reinterpret_cast<Address>(context.Ebp);
+        sample->pc = reinterpret_cast<Address>(context.Eip);
+        sample->sp = reinterpret_cast<Address>(context.Esp);
+        sample->fp = reinterpret_cast<Address>(context.Ebp);
 #endif
-          sampler_->SampleStack(sample);
-        }
+        sampler_->SampleStack(sample);
+        sampler_->Tick(sample);
       }
-
-      // Invoke tick handler with program counter and stack pointer.
-      sampler_->Tick(sample);
-
-      // If the sampler runs in sync with the JS thread, we have to
-      // remember to resume it.
-      if (sampler_->IsSynchronous()) ResumeThread(profiled_thread_);
+      ResumeThread(profiled_thread_);
     }
+    if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
   }
 };
 
@@ -1899,10 +1866,9 @@
 
 
 // Initialize a profile sampler.
-Sampler::Sampler(int interval, bool profiling)
+Sampler::Sampler(int interval)
     : interval_(interval),
-      profiling_(profiling),
-      synchronous_(profiling),
+      profiling_(false),
       active_(false),
       samples_taken_(0) {
   data_ = new PlatformData(this);
@@ -1916,26 +1882,25 @@
 
 // Start profiling.
 void Sampler::Start() {
-  // If we are starting a synchronous sampler, we need to be able to
-  // access the calling thread.
-  if (IsSynchronous()) {
-    // Get a handle to the calling thread. This is the thread that we are
-    // going to profile. We need to make a copy of the handle because we are
-    // going to use it in the sampler thread. Using GetThreadHandle() will
-    // not work in this case. We're using OpenThread because DuplicateHandle
-    // for some reason doesn't work in Chrome's sandbox.
-    data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
-                                         THREAD_SUSPEND_RESUME |
-                                         THREAD_QUERY_INFORMATION,
-                                         FALSE,
-                                         GetCurrentThreadId());
-    BOOL ok = data_->profiled_thread_ != NULL;
-    if (!ok) return;
-  }
+  // Do not start multiple threads for the same sampler.
+  ASSERT(!IsActive());
+
+  // Get a handle to the calling thread. This is the thread that we are
+  // going to profile. We need to make a copy of the handle because we are
+  // going to use it in the sampler thread. Using GetThreadHandle() will
+  // not work in this case. We're using OpenThread because DuplicateHandle
+  // for some reason doesn't work in Chrome's sandbox.
+  data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
+                                       THREAD_SUSPEND_RESUME |
+                                       THREAD_QUERY_INFORMATION,
+                                       false,
+                                       GetCurrentThreadId());
+  BOOL ok = data_->profiled_thread_ != NULL;
+  if (!ok) return;
 
   // Start sampler thread.
   unsigned int tid;
-  active_ = true;
+  SetActive(true);
   data_->sampler_thread_ = reinterpret_cast<HANDLE>(
       _beginthreadex(NULL, 0, SamplerEntry, data_, 0, &tid));
   // Set thread to high priority to increase sampling accuracy.
@@ -1947,9 +1912,10 @@
 void Sampler::Stop() {
   // Seting active to false triggers termination of the sampler
   // thread.
-  active_ = false;
+  SetActive(false);
 
   // Wait for sampler thread to terminate.
+  Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
   WaitForSingleObject(data_->sampler_thread_, INFINITE);
 
   // Release the thread handles
diff --git a/src/platform.h b/src/platform.h
index 49efc3c..5a3e4a3 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -113,6 +113,10 @@
 
 #endif  // __GNUC__
 
+#include "atomicops.h"
+#include "utils.h"
+#include "v8globals.h"
+
 namespace v8 {
 namespace internal {
 
@@ -121,6 +125,7 @@
 typedef intptr_t AtomicWord;
 
 class Semaphore;
+class Mutex;
 
 double ceiling(double x);
 double modulo(double x, double y);
@@ -179,6 +184,10 @@
   static void Print(const char* format, ...);
   static void VPrint(const char* format, va_list args);
 
+  // Print output to a file. This is mostly used for debugging output.
+  static void FPrint(FILE* out, const char* format, ...);
+  static void VFPrint(FILE* out, const char* format, va_list args);
+
   // Print error output to console. This is mostly used for error message
   // output. On platforms that has standard terminal output, the output
   // should go to stderr.
@@ -433,6 +442,10 @@
   // Unlocks the given mutex. The mutex is assumed to be locked and owned by
   // the calling thread on entrance.
   virtual int Unlock() = 0;
+
+  // Tries to lock the given mutex. Returns whether the mutex was
+  // successfully locked.
+  virtual bool TryLock() = 0;
 };
 
 
@@ -554,7 +567,7 @@
 class Sampler {
  public:
   // Initialize sampler.
-  Sampler(int interval, bool profiling);
+  explicit Sampler(int interval);
   virtual ~Sampler();
 
   // Performs stack sampling.
@@ -572,16 +585,12 @@
   void Stop();
 
   // Is the sampler used for profiling?
-  bool IsProfiling() const { return profiling_; }
-
-  // Is the sampler running in sync with the JS thread? On platforms
-  // where the sampler is implemented with a thread that wakes up
-  // every now and then, having a synchronous sampler implies
-  // suspending/resuming the JS thread.
-  bool IsSynchronous() const { return synchronous_; }
+  bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
+  void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
+  void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
 
   // Whether the sampler is running (that is, consumes resources).
-  bool IsActive() const { return active_; }
+  bool IsActive() const { return NoBarrier_Load(&active_); }
 
   // Used in tests to make sure that stack sampling is performed.
   int samples_taken() const { return samples_taken_; }
@@ -593,12 +602,12 @@
   virtual void DoSampleStack(TickSample* sample) = 0;
 
  private:
+  void SetActive(bool value) { NoBarrier_Store(&active_, value); }
   void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
 
   const int interval_;
-  const bool profiling_;
-  const bool synchronous_;
-  bool active_;
+  Atomic32 profiling_;
+  Atomic32 active_;
   PlatformData* data_;  // Platform specific data.
   int samples_taken_;  // Counts stack samples taken.
   DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
new file mode 100644
index 0000000..cbec9b7
--- /dev/null
+++ b/src/preparser-api.cc
@@ -0,0 +1,209 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "../include/v8-preparser.h"
+#include "globals.h"
+#include "checks.h"
+#include "allocation.h"
+#include "utils.h"
+#include "list.h"
+#include "scanner-base.h"
+#include "preparse-data.h"
+#include "preparser.h"
+
+namespace v8 {
+namespace internal {
+
+// UTF16Buffer based on a v8::UnicodeInputStream.
+class InputStreamUTF16Buffer : public UC16CharacterStream {
+ public:
+  /* The InputStreamUTF16Buffer maintains an internal buffer
+   * that is filled in chunks from the UC16CharacterStream.
+   * It also maintains unlimited pushback capability, but optimized
+   * for small pushbacks.
+   * The pushback_buffer_ pointer points to the limit of pushbacks
+   * in the current buffer. There is room for a few pushback'ed chars before
+   * the buffer containing the most recently read chunk. If this is overflowed,
+   * an external buffer is allocated/reused to hold further pushbacks, and
+   * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the
+   * new buffer. When this buffer is read to the end again, the cursor is
+   * switched back to the internal buffer
+   */
+  explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
+      : UC16CharacterStream(),
+        stream_(stream),
+        pushback_buffer_(buffer_),
+        pushback_buffer_end_cache_(NULL),
+        pushback_buffer_backing_(NULL),
+        pushback_buffer_backing_size_(0) {
+    buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
+  }
+
+  virtual ~InputStreamUTF16Buffer() {
+    if (pushback_buffer_backing_ != NULL) {
+      DeleteArray(pushback_buffer_backing_);
+    }
+  }
+
+  virtual void PushBack(uc16 ch) {
+    ASSERT(pos_ > 0);
+    if (buffer_cursor_ <= pushback_buffer_) {
+      // No more room in the current buffer to do pushbacks.
+      if (pushback_buffer_end_cache_ == NULL) {
+        // We have overflowed the pushback space at the beginning of buffer_.
+        // Switch to using a separate allocated pushback buffer.
+        if (pushback_buffer_backing_ == NULL) {
+          // Allocate a buffer the first time we need it.
+          pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize);
+          pushback_buffer_backing_size_ = kPushBackSize;
+        }
+        pushback_buffer_ = pushback_buffer_backing_;
+        pushback_buffer_end_cache_ = buffer_end_;
+        buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
+        buffer_cursor_ = buffer_end_ - 1;
+      } else {
+        // Hit the bottom of the allocated pushback buffer.
+        // Double the buffer and continue.
+        uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
+        memcpy(new_buffer + pushback_buffer_backing_size_,
+               pushback_buffer_backing_,
+               pushback_buffer_backing_size_);
+        DeleteArray(pushback_buffer_backing_);
+        buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
+        pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
+        buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_;
+      }
+    }
+    pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] = ch;
+    pos_--;
+  }
+
+ protected:
+  virtual bool ReadBlock() {
+    if (pushback_buffer_end_cache_ != NULL) {
+      buffer_cursor_ = buffer_;
+      buffer_end_ = pushback_buffer_end_cache_;
+      pushback_buffer_end_cache_ = NULL;
+      return buffer_end_ > buffer_cursor_;
+    }
+    // Copy the top of the buffer into the pushback area.
+    int32_t value;
+    uc16* buffer_start = buffer_ + kPushBackSize;
+    buffer_cursor_ = buffer_end_ = buffer_start;
+    while ((value = stream_->Next()) >= 0) {
+      if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
+        value = unibrow::Utf8::kBadChar;
+      }
+      // buffer_end_ is a const pointer, but buffer_ is writable.
+      buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
+      if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
+    }
+    return buffer_end_ > buffer_start;
+  }
+
+  virtual unsigned SlowSeekForward(unsigned pos) {
+    // Seeking in the input is not used by preparsing.
+    // It's only used by the real parser based on preparser data.
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+ private:
+  static const unsigned kBufferSize = 512;
+  static const unsigned kPushBackSize = 16;
+  v8::UnicodeInputStream* const stream_;
+  // Buffer holding first kPushBackSize characters of pushback buffer,
+  // then kBufferSize chars of read-ahead.
+  // The pushback buffer is only used if pushing back characters past
+  // the start of a block.
+  uc16 buffer_[kPushBackSize + kBufferSize];
+  // Limit of pushbacks before new allocation is necessary.
+  uc16* pushback_buffer_;
+  // Only if that pushback buffer at the start of buffer_ isn't sufficient
+  // is the following used.
+  const uc16* pushback_buffer_end_cache_;
+  uc16* pushback_buffer_backing_;
+  unsigned pushback_buffer_backing_size_;
+};
+
+
+class StandAloneJavaScriptScanner : public JavaScriptScanner {
+ public:
+  void Initialize(UC16CharacterStream* source) {
+    source_ = source;
+    literal_flags_ = kLiteralString | kLiteralIdentifier;
+    Init();
+    // Skip initial whitespace allowing HTML comment ends just like
+    // after a newline and scan first token.
+    has_line_terminator_before_next_ = true;
+    SkipWhiteSpace();
+    Scan();
+  }
+};
+
+
+// Functions declared by allocation.h
+
+void FatalProcessOutOfMemory(const char* reason) {
+  V8_Fatal(__FILE__, __LINE__, reason);
+}
+
+bool EnableSlowAsserts() { return true; }
+
+}  // namespace internal.
+
+
+UnicodeInputStream::~UnicodeInputStream() { }
+
+
+PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
+  internal::InputStreamUTF16Buffer buffer(input);
+  uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
+  internal::StandAloneJavaScriptScanner scanner;
+  scanner.Initialize(&buffer);
+  internal::CompleteParserRecorder recorder;
+  preparser::PreParser::PreParseResult result =
+      preparser::PreParser::PreParseProgram(&scanner,
+                                            &recorder,
+                                            true,
+                                            stack_limit);
+  if (result == preparser::PreParser::kPreParseStackOverflow) {
+    return PreParserData::StackOverflow();
+  }
+  internal::Vector<unsigned> pre_data = recorder.ExtractData();
+  size_t size = pre_data.length() * sizeof(pre_data[0]);
+  unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start());
+  return PreParserData(size, data);
+}
+
+}  // namespace v8.
+
+
+// Used by ASSERT macros and other immediate exits.
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+  exit(EXIT_FAILURE);
+}
diff --git a/src/preparser.cc b/src/preparser.cc
index 9061731..7cce685 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -65,7 +65,7 @@
   // We don't report stack overflows here, to avoid increasing the
   // stack depth even further.  Instead we report it after parsing is
   // over, in ParseProgram.
-  if (token == i::Token::ILLEGAL && scanner_->stack_overflow()) {
+  if (token == i::Token::ILLEGAL && stack_overflow_) {
     return;
   }
   i::JavaScriptScanner::Location source_location = scanner_->location();
@@ -92,8 +92,8 @@
 }
 
 
-SourceElements PreParser::ParseSourceElements(int end_token,
-                                                            bool* ok) {
+PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
+                                                         bool* ok) {
   // SourceElements ::
   //   (Statement)* <end_token>
 
@@ -104,7 +104,7 @@
 }
 
 
-Statement PreParser::ParseStatement(bool* ok) {
+PreParser::Statement PreParser::ParseStatement(bool* ok) {
   // Statement ::
   //   Block
   //   VariableStatement
@@ -190,7 +190,7 @@
 }
 
 
-Statement PreParser::ParseFunctionDeclaration(bool* ok) {
+PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(i::Token::FUNCTION, CHECK_OK);
@@ -204,7 +204,7 @@
 // through the API's extension mechanism.  A native function
 // declaration is resolved by looking up the function through a
 // callback provided by the extension.
-Statement PreParser::ParseNativeDeclaration(bool* ok) {
+PreParser::Statement PreParser::ParseNativeDeclaration(bool* ok) {
   Expect(i::Token::NATIVE, CHECK_OK);
   Expect(i::Token::FUNCTION, CHECK_OK);
   ParseIdentifier(CHECK_OK);
@@ -223,7 +223,7 @@
 }
 
 
-Statement PreParser::ParseBlock(bool* ok) {
+PreParser::Statement PreParser::ParseBlock(bool* ok) {
   // Block ::
   //   '{' Statement* '}'
 
@@ -239,7 +239,7 @@
 }
 
 
-Statement PreParser::ParseVariableStatement(bool* ok) {
+PreParser::Statement PreParser::ParseVariableStatement(bool* ok) {
   // VariableStatement ::
   //   VariableDeclarations ';'
 
@@ -254,9 +254,9 @@
 // *var is untouched; in particular, it is the caller's responsibility
 // to initialize it properly. This mechanism is also used for the parsing
 // of 'for-in' loops.
-Statement PreParser::ParseVariableDeclarations(bool accept_IN,
-                                                  int* num_decl,
-                                                  bool* ok) {
+PreParser::Statement PreParser::ParseVariableDeclarations(bool accept_IN,
+                                                          int* num_decl,
+                                                          bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
 
@@ -288,7 +288,7 @@
 }
 
 
-Statement PreParser::ParseExpressionOrLabelledStatement(
+PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(
     bool* ok) {
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
@@ -305,7 +305,7 @@
 }
 
 
-Statement PreParser::ParseIfStatement(bool* ok) {
+PreParser::Statement PreParser::ParseIfStatement(bool* ok) {
   // IfStatement ::
   //   'if' '(' Expression ')' Statement ('else' Statement)?
 
@@ -322,7 +322,7 @@
 }
 
 
-Statement PreParser::ParseContinueStatement(bool* ok) {
+PreParser::Statement PreParser::ParseContinueStatement(bool* ok) {
   // ContinueStatement ::
   //   'continue' [no line terminator] Identifier? ';'
 
@@ -339,7 +339,7 @@
 }
 
 
-Statement PreParser::ParseBreakStatement(bool* ok) {
+PreParser::Statement PreParser::ParseBreakStatement(bool* ok) {
   // BreakStatement ::
   //   'break' [no line terminator] Identifier? ';'
 
@@ -356,7 +356,7 @@
 }
 
 
-Statement PreParser::ParseReturnStatement(bool* ok) {
+PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
   // ReturnStatement ::
   //   'return' [no line terminator] Expression? ';'
 
@@ -382,7 +382,7 @@
 }
 
 
-Statement PreParser::ParseWithStatement(bool* ok) {
+PreParser::Statement PreParser::ParseWithStatement(bool* ok) {
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
   Expect(i::Token::WITH, CHECK_OK);
@@ -397,7 +397,7 @@
 }
 
 
-Statement PreParser::ParseSwitchStatement(bool* ok) {
+PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
@@ -427,7 +427,7 @@
 }
 
 
-Statement PreParser::ParseDoWhileStatement(bool* ok) {
+PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
@@ -441,7 +441,7 @@
 }
 
 
-Statement PreParser::ParseWhileStatement(bool* ok) {
+PreParser::Statement PreParser::ParseWhileStatement(bool* ok) {
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
@@ -454,7 +454,7 @@
 }
 
 
-Statement PreParser::ParseForStatement(bool* ok) {
+PreParser::Statement PreParser::ParseForStatement(bool* ok) {
   // ForStatement ::
   //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
 
@@ -503,7 +503,7 @@
 }
 
 
-Statement PreParser::ParseThrowStatement(bool* ok) {
+PreParser::Statement PreParser::ParseThrowStatement(bool* ok) {
   // ThrowStatement ::
   //   'throw' [no line terminator] Expression ';'
 
@@ -522,7 +522,7 @@
 }
 
 
-Statement PreParser::ParseTryStatement(bool* ok) {
+PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
   // TryStatement ::
   //   'try' Block Catch
   //   'try' Block Finally
@@ -565,7 +565,7 @@
 }
 
 
-Statement PreParser::ParseDebuggerStatement(bool* ok) {
+PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) {
   // In ECMA-262 'debugger' is defined as a reserved keyword. In some browser
   // contexts this is used as a statement which invokes the debugger as if a
   // break point is present.
@@ -579,7 +579,7 @@
 
 
 // Precedence = 1
-Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
+PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) {
   // Expression ::
   //   AssignmentExpression
   //   Expression ',' AssignmentExpression
@@ -595,8 +595,8 @@
 
 
 // Precedence = 2
-Expression PreParser::ParseAssignmentExpression(bool accept_IN,
-                                                              bool* ok) {
+PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN,
+                                                           bool* ok) {
   // AssignmentExpression ::
   //   ConditionalExpression
   //   LeftHandSideExpression AssignmentOperator AssignmentExpression
@@ -620,8 +620,8 @@
 
 
 // Precedence = 3
-Expression PreParser::ParseConditionalExpression(bool accept_IN,
-                                                               bool* ok) {
+PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN,
+                                                            bool* ok) {
   // ConditionalExpression ::
   //   LogicalOrExpression
   //   LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression
@@ -649,9 +649,9 @@
 
 
 // Precedence >= 4
-Expression PreParser::ParseBinaryExpression(int prec,
-                                                          bool accept_IN,
-                                                          bool* ok) {
+PreParser::Expression PreParser::ParseBinaryExpression(int prec,
+                                                       bool accept_IN,
+                                                       bool* ok) {
   Expression result = ParseUnaryExpression(CHECK_OK);
   for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
     // prec1 >= 4
@@ -665,7 +665,7 @@
 }
 
 
-Expression PreParser::ParseUnaryExpression(bool* ok) {
+PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) {
   // UnaryExpression ::
   //   PostfixExpression
   //   'delete' UnaryExpression
@@ -689,7 +689,7 @@
 }
 
 
-Expression PreParser::ParsePostfixExpression(bool* ok) {
+PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) {
   // PostfixExpression ::
   //   LeftHandSideExpression ('++' | '--')?
 
@@ -703,7 +703,7 @@
 }
 
 
-Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
+PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) {
   // LeftHandSideExpression ::
   //   (NewExpression | MemberExpression) ...
 
@@ -752,7 +752,7 @@
 }
 
 
-Expression PreParser::ParseNewExpression(bool* ok) {
+PreParser::Expression PreParser::ParseNewExpression(bool* ok) {
   // NewExpression ::
   //   ('new')+ MemberExpression
 
@@ -774,12 +774,12 @@
 }
 
 
-Expression PreParser::ParseMemberExpression(bool* ok) {
+PreParser::Expression PreParser::ParseMemberExpression(bool* ok) {
   return ParseMemberWithNewPrefixesExpression(0, ok);
 }
 
 
-Expression PreParser::ParseMemberWithNewPrefixesExpression(
+PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression(
     unsigned new_count, bool* ok) {
   // MemberExpression ::
   //   (PrimaryExpression | FunctionLiteral)
@@ -835,7 +835,7 @@
 }
 
 
-Expression PreParser::ParsePrimaryExpression(bool* ok) {
+PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) {
   // PrimaryExpression ::
   //   'this'
   //   'null'
@@ -914,7 +914,7 @@
 }
 
 
-Expression PreParser::ParseArrayLiteral(bool* ok) {
+PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) {
   // ArrayLiteral ::
   //   '[' Expression? (',' Expression?)* ']'
   Expect(i::Token::LBRACK, CHECK_OK);
@@ -933,7 +933,7 @@
 }
 
 
-Expression PreParser::ParseObjectLiteral(bool* ok) {
+PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
   // ObjectLiteral ::
   //   '{' (
   //       ((IdentifierName | String | Number) ':' AssignmentExpression)
@@ -995,8 +995,8 @@
 }
 
 
-Expression PreParser::ParseRegExpLiteral(bool seen_equal,
-                                         bool* ok) {
+PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal,
+                                                    bool* ok) {
   if (!scanner_->ScanRegExpPattern(seen_equal)) {
     Next();
     i::JavaScriptScanner::Location location = scanner_->location();
@@ -1021,7 +1021,7 @@
 }
 
 
-Arguments PreParser::ParseArguments(bool* ok) {
+PreParser::Arguments PreParser::ParseArguments(bool* ok) {
   // Arguments ::
   //   '(' (AssignmentExpression)*[','] ')'
 
@@ -1039,7 +1039,7 @@
 }
 
 
-Expression PreParser::ParseFunctionLiteral(bool* ok) {
+PreParser::Expression PreParser::ParseFunctionLiteral(bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
 
@@ -1078,6 +1078,7 @@
 
     Expect(i::Token::RBRACE, CHECK_OK);
 
+    // Position right after terminal '}'.
     int end_pos = scanner_->location().end_pos;
     log_->LogFunction(function_block_pos, end_pos,
                       function_scope.materialized_literal_count(),
@@ -1090,7 +1091,7 @@
 }
 
 
-Expression PreParser::ParseV8Intrinsic(bool* ok) {
+PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
 
@@ -1119,7 +1120,7 @@
 }
 
 
-Identifier PreParser::GetIdentifierSymbol() {
+PreParser::Identifier PreParser::GetIdentifierSymbol() {
   const char* literal_chars = scanner_->literal_string();
   int literal_length = scanner_->literal_length();
   int identifier_pos = scanner_->location().beg_pos;
@@ -1130,7 +1131,7 @@
 }
 
 
-Expression PreParser::GetStringSymbol() {
+PreParser::Expression PreParser::GetStringSymbol() {
   const char* literal_chars = scanner_->literal_string();
   int literal_length = scanner_->literal_length();
 
@@ -1141,14 +1142,14 @@
 }
 
 
-Identifier PreParser::ParseIdentifier(bool* ok) {
+PreParser::Identifier PreParser::ParseIdentifier(bool* ok) {
   Expect(i::Token::IDENTIFIER, ok);
   if (!*ok) return kUnknownIdentifier;
   return GetIdentifierSymbol();
 }
 
 
-Identifier PreParser::ParseIdentifierName(bool* ok) {
+PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) {
   i::Token::Value next = Next();
   if (i::Token::IsKeyword(next)) {
     int pos = scanner_->location().beg_pos;
@@ -1168,9 +1169,9 @@
 // is 'get' or 'set'.  The reason for not using ParseIdentifier and
 // checking on the output is that this involves heap allocation which
 // we can't do during preparsing.
-Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
-                                                bool* is_set,
-                                                bool* ok) {
+PreParser::Identifier PreParser::ParseIdentifierOrGetOrSet(bool* is_get,
+                                                           bool* is_set,
+                                                           bool* ok) {
   Expect(i::Token::IDENTIFIER, CHECK_OK);
   if (scanner_->literal_length() == 3) {
     const char* token = scanner_->literal_string();
diff --git a/src/preparser.h b/src/preparser.h
index b783d65..893b575 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -46,56 +46,24 @@
 
 namespace i = v8::internal;
 
-enum StatementType {
-  kUnknownStatement
-};
-
-enum ExpressionType {
-  kUnknownExpression,
-  kIdentifierExpression,  // Used to detect labels.
-  kThisExpression,
-  kThisPropertyExpression
-};
-
-enum IdentifierType {
-  kUnknownIdentifier
-};
-
-enum SourceElementTypes {
-  kUnknownSourceElements
-};
-
-
-typedef int SourceElements;
-typedef int Expression;
-typedef int Statement;
-typedef int Identifier;
-typedef int Arguments;
-
-
 class PreParser {
  public:
-  PreParser() : scope_(NULL), allow_lazy_(true) { }
+  enum PreParseResult {
+    kPreParseStackOverflow,
+    kPreParseSuccess
+  };
+
   ~PreParser() { }
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  bool PreParseProgram(i::JavaScriptScanner* scanner,
-                       i::ParserRecorder* log,
-                       bool allow_lazy) {
-    allow_lazy_ = allow_lazy;
-    scanner_ = scanner;
-    log_ = log;
-    Scope top_scope(&scope_, kTopLevelScope);
-    bool ok = true;
-    ParseSourceElements(i::Token::EOS, &ok);
-    bool stack_overflow = scanner_->stack_overflow();
-    if (!ok && !stack_overflow) {
-      ReportUnexpectedToken(scanner_->current_token());
-    }
-    return !stack_overflow;
+  static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
+                                        i::ParserRecorder* log,
+                                        bool allow_lazy,
+                                        uintptr_t stack_limit) {
+    return PreParser(scanner, log, stack_limit, allow_lazy).PreParse();
   }
 
  private:
@@ -104,6 +72,38 @@
     kFunctionScope
   };
 
+  // Types that allow us to recognize simple this-property assignments.
+  // A simple this-property assignment is a statement on the form
+  // "this.propertyName = {primitive constant or function parameter name);"
+  // where propertyName isn't "__proto__".
+  // The result is only relevant if the function body contains only
+  // simple this-property assignments.
+
+  enum StatementType {
+    kUnknownStatement
+  };
+
+  enum ExpressionType {
+    kUnknownExpression,
+    kIdentifierExpression,  // Used to detect labels.
+    kThisExpression,
+    kThisPropertyExpression
+  };
+
+  enum IdentifierType {
+    kUnknownIdentifier
+  };
+
+  enum SourceElementTypes {
+    kUnknownSourceElements
+  };
+
+  typedef int SourceElements;
+  typedef int Expression;
+  typedef int Statement;
+  typedef int Identifier;
+  typedef int Arguments;
+
   class Scope {
    public:
     Scope(Scope** variable, ScopeType type)
@@ -134,12 +134,30 @@
     int with_nesting_count_;
   };
 
-  // Types that allow us to recognize simple this-property assignments.
-  // A simple this-property assignment is a statement on the form
-  // "this.propertyName = {primitive constant or function parameter name);"
-  // where propertyName isn't "__proto__".
-  // The result is only relevant if the function body contains only
-  // simple this-property assignments.
+  // Private constructor only used in PreParseProgram.
+  PreParser(i::JavaScriptScanner* scanner,
+            i::ParserRecorder* log,
+            uintptr_t stack_limit,
+            bool allow_lazy)
+      : scanner_(scanner),
+        log_(log),
+        scope_(NULL),
+        stack_limit_(stack_limit),
+        stack_overflow_(false),
+        allow_lazy_(true) { }
+
+  // Preparse the program. Only called in PreParseProgram after creating
+  // the instance.
+  PreParseResult PreParse() {
+    Scope top_scope(&scope_, kTopLevelScope);
+    bool ok = true;
+    ParseSourceElements(i::Token::EOS, &ok);
+    if (stack_overflow_) return kPreParseStackOverflow;
+    if (!ok) {
+      ReportUnexpectedToken(scanner_->current_token());
+    }
+    return kPreParseSuccess;
+  }
 
   // Report syntax error
   void ReportUnexpectedToken(i::Token::Value token);
@@ -202,16 +220,26 @@
   unsigned int HexDigitValue(char digit);
   Expression GetStringSymbol();
 
+  i::Token::Value peek() {
+    if (stack_overflow_) return i::Token::ILLEGAL;
+    return scanner_->peek();
+  }
 
-  i::Token::Value peek() { return scanner_->peek(); }
   i::Token::Value Next() {
-    i::Token::Value next = scanner_->Next();
-    return next;
+    if (stack_overflow_) return i::Token::ILLEGAL;
+    {
+      int marker;
+      if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
+        // Further calls to peek/Next will return illegal token.
+        // The current one will still be returned. It might already
+        // have been seen using peek.
+        stack_overflow_ = true;
+      }
+    }
+    return scanner_->Next();
   }
 
-  void Consume(i::Token::Value token) {
-    Next();
-  }
+  void Consume(i::Token::Value token) { Next(); }
 
   void Expect(i::Token::Value token, bool* ok) {
     if (Next() != token) {
@@ -234,6 +262,8 @@
   i::JavaScriptScanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
+  uintptr_t stack_limit_;
+  bool stack_overflow_;
   bool allow_lazy_;
 };
 } }  // v8::preparser
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index dfff49a..c83de34 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -51,6 +51,7 @@
   // Print a node to stdout.
   static void PrintOut(AstNode* node);
 
+  virtual void VisitSlot(Slot* node);
   // Individual nodes
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -85,9 +86,11 @@
   const char* PrintProgram(FunctionLiteral* program);
 
   // Individual nodes
+  virtual void VisitSlot(Slot* node);
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
+
  private:
   friend class IndentedScope;
   void PrintIndented(const char* txt);
@@ -160,6 +163,7 @@
   void AddAttribute(const char* name, bool value);
 
   // AST node visit functions.
+  virtual void VisitSlot(Slot* node);
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 8b5c1e2..3df6af0 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -122,7 +122,7 @@
 }
 
 
-inline uint64_t HeapEntry::id() {
+uint64_t HeapEntry::id() {
   union {
     Id stored_id;
     uint64_t returned_id;
@@ -146,6 +146,18 @@
   }
 }
 
+
+bool HeapSnapshotGenerator::ReportProgress(bool force) {
+  const int kProgressReportGranularity = 10000;
+  if (control_ != NULL
+      && (force || progress_counter_ % kProgressReportGranularity == 0)) {
+      return
+          control_->ReportProgressValue(progress_counter_, progress_total_) ==
+          v8::ActivityControl::kContinue;
+  }
+  return true;
+}
+
 } }  // namespace v8::internal
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 640f13c..34d1877 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -603,8 +603,8 @@
   }
   List<CpuProfile*>* list = GetProfilesList(security_token_id);
   if (list->at(index) == NULL) {
-      list->at(index) =
-          unabridged_list->at(index)->FilteredClone(security_token_id);
+    (*list)[index] =
+        unabridged_list->at(index)->FilteredClone(security_token_id);
   }
   return list->at(index);
 }
@@ -653,7 +653,7 @@
   const int current_count = unabridged_list->length();
   for (int i = 0; i < current_count; ++i) {
     if (list->at(i) == NULL) {
-      list->at(i) = unabridged_list->at(i)->FilteredClone(security_token_id);
+      (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id);
     }
   }
   return list;
@@ -1382,86 +1382,6 @@
 }
 
 
-void HeapSnapshot::FillReversePostorderIndexes(Vector<HeapEntry*>* entries) {
-  ClearPaint();
-  int current_entry = 0;
-  List<HeapEntry*> nodes_to_visit;
-  nodes_to_visit.Add(root());
-  root()->paint_reachable();
-  while (!nodes_to_visit.is_empty()) {
-    HeapEntry* entry = nodes_to_visit.last();
-    Vector<HeapGraphEdge> children = entry->children();
-    bool has_new_edges = false;
-    for (int i = 0; i < children.length(); ++i) {
-      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
-      HeapEntry* child = children[i].to();
-      if (!child->painted_reachable()) {
-        nodes_to_visit.Add(child);
-        child->paint_reachable();
-        has_new_edges = true;
-      }
-    }
-    if (!has_new_edges) {
-      entry->set_ordered_index(current_entry);
-      entries->at(current_entry++) = entry;
-      nodes_to_visit.RemoveLast();
-    }
-  }
-  entries->Truncate(current_entry);
-}
-
-
-static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
-  int finger1 = i1, finger2 = i2;
-  while (finger1 != finger2) {
-    while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
-    while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
-  }
-  return finger1;
-}
-
-// The algorithm is based on the article:
-// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
-// Softw. Pract. Exper. 4 (2001), pp. 1–10.
-void HeapSnapshot::BuildDominatorTree(const Vector<HeapEntry*>& entries,
-                                      Vector<HeapEntry*>* dominators) {
-  if (entries.length() == 0) return;
-  const int root_index = entries.length() - 1;
-  for (int i = 0; i < root_index; ++i) dominators->at(i) = NULL;
-  dominators->at(root_index) = entries[root_index];
-  bool changed = true;
-  while (changed) {
-    changed = false;
-    for (int i = root_index - 1; i >= 0; --i) {
-      HeapEntry* new_idom = NULL;
-      Vector<HeapGraphEdge*> rets = entries[i]->retainers();
-      int j = 0;
-      for (; j < rets.length(); ++j) {
-        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
-        HeapEntry* ret = rets[j]->From();
-        if (dominators->at(ret->ordered_index()) != NULL) {
-          new_idom = ret;
-          break;
-        }
-      }
-      for (++j; j < rets.length(); ++j) {
-        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
-        HeapEntry* ret = rets[j]->From();
-        if (dominators->at(ret->ordered_index()) != NULL) {
-          new_idom = entries[Intersect(ret->ordered_index(),
-                                       new_idom->ordered_index(),
-                                       *dominators)];
-        }
-      }
-      if (new_idom != NULL && dominators->at(i) != new_idom) {
-        dominators->at(i) = new_idom;
-        changed = true;
-      }
-    }
-  }
-}
-
-
 void HeapSnapshot::SetDominatorsToSelf() {
   for (int i = 0; i < entries_.length(); ++i) {
     HeapEntry* entry = entries_[i];
@@ -1470,61 +1390,6 @@
 }
 
 
-void HeapSnapshot::SetEntriesDominators() {
-  // This array is used for maintaining reverse postorder of nodes.
-  ScopedVector<HeapEntry*> ordered_entries(entries_.length());
-  FillReversePostorderIndexes(&ordered_entries);
-  ScopedVector<HeapEntry*> dominators(ordered_entries.length());
-  BuildDominatorTree(ordered_entries, &dominators);
-  for (int i = 0; i < ordered_entries.length(); ++i) {
-    ASSERT(dominators[i] != NULL);
-    ordered_entries[i]->set_dominator(dominators[i]);
-  }
-  // For nodes unreachable from root, set dominator to itself.
-  SetDominatorsToSelf();
-}
-
-
-void HeapSnapshot::ApproximateRetainedSizes() {
-  SetEntriesDominators();
-  // As for the dominators tree we only know parent nodes, not
-  // children, to sum up total sizes we traverse the tree level by
-  // level upwards, starting from leaves.
-  for (int i = 0; i < entries_.length(); ++i) {
-    HeapEntry* entry = entries_[i];
-    entry->set_retained_size(entry->self_size());
-    entry->set_leaf();
-  }
-  while (true) {
-    bool onlyLeaves = true;
-    for (int i = 0; i < entries_.length(); ++i) {
-      HeapEntry *entry = entries_[i], *dominator = entry->dominator();
-      if (!entry->is_processed() && dominator != entry) {
-        dominator->set_non_leaf();
-        onlyLeaves = false;
-      }
-    }
-    if (onlyLeaves) break;
-
-    for (int i = 0; i < entries_.length(); ++i) {
-      HeapEntry *entry = entries_[i], *dominator = entry->dominator();
-      if (entry->is_leaf() && dominator != entry) {
-        dominator->add_retained_size(entry->retained_size());
-      }
-    }
-
-    // Mark all current leaves as processed, reset non-leaves back to leaves.
-    for (int i = 0; i < entries_.length(); ++i) {
-      HeapEntry* entry = entries_[i];
-      if (entry->is_leaf())
-        entry->set_processed();
-      else if (entry->is_non_leaf())
-        entry->set_leaf();
-    }
-  }
-}
-
-
 HeapEntry* HeapSnapshot::GetNextEntryToInit() {
   if (entries_.length() > 0) {
     HeapEntry* last_entry = entries_.last();
@@ -1544,6 +1409,29 @@
 }
 
 
+HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
+  // GetSortedEntriesList is used in diff algorithm and sorts
+  // entries by their id.
+  List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
+
+  // Perform a binary search by id.
+  int low = 0;
+  int high = entries_by_id->length() - 1;
+  while (low <= high) {
+    int mid =
+        (static_cast<unsigned int>(low) + static_cast<unsigned int>(high)) >> 1;
+    uint64_t mid_id = entries_by_id->at(mid)->id();
+    if (mid_id > id)
+      high = mid - 1;
+    else if (mid_id < id)
+      low = mid + 1;
+    else
+      return entries_by_id->at(mid);
+  }
+  return NULL;
+}
+
+
 List<HeapGraphPath*>* HeapSnapshot::GetRetainingPaths(HeapEntry* entry) {
   HashMap::Entry* p =
       retaining_paths_.Lookup(entry, HeapEntry::Hash(entry), true);
@@ -1693,15 +1581,22 @@
                                                    const char* name,
                                                    unsigned uid) {
   is_tracking_objects_ = true;  // Start watching for heap objects moves.
-  HeapSnapshot* snapshot = new HeapSnapshot(this, type, name, uid);
-  snapshots_.Add(snapshot);
-  HashMap::Entry* entry =
-      snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
-                             static_cast<uint32_t>(snapshot->uid()),
-                             true);
-  ASSERT(entry->value == NULL);
-  entry->value = snapshot;
-  return snapshot;
+  return new HeapSnapshot(this, type, name, uid);
+}
+
+
+void HeapSnapshotsCollection::SnapshotGenerationFinished(
+    HeapSnapshot* snapshot) {
+  ids_.SnapshotGenerationFinished();
+  if (snapshot != NULL) {
+    snapshots_.Add(snapshot);
+    HashMap::Entry* entry =
+        snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
+                               static_cast<uint32_t>(snapshot->uid()),
+                               true);
+    ASSERT(entry->value == NULL);
+    entry->value = snapshot;
+  }
 }
 
 
@@ -1809,8 +1704,10 @@
 }
 
 
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot)
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
+                                             v8::ActivityControl* control)
     : snapshot_(snapshot),
+      control_(control),
       collection_(snapshot->collection()),
       filler_(NULL) {
 }
@@ -1967,21 +1864,13 @@
 };
 
 
-void HeapSnapshotGenerator::GenerateSnapshot() {
+bool HeapSnapshotGenerator::GenerateSnapshot() {
   AssertNoAllocation no_alloc;
 
+  SetProgressTotal(4);  // 2 passes + dominators + sizes.
+
   // Pass 1. Iterate heap contents to count entries and references.
-  SnapshotCounter counter(&entries_);
-  filler_ = &counter;
-  filler_->AddEntry(HeapSnapshot::kInternalRootObject);
-  filler_->AddEntry(HeapSnapshot::kGcRootsObject);
-  HeapIterator iterator(HeapIterator::kPreciseFiltering);
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    ExtractReferences(obj);
-  }
-  SetRootGcRootsReference();
-  RootsReferencesExtractor extractor(this);
-  Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+  if (!CountEntriesAndReferences()) return false;
 
   // Allocate and fill entries in the snapshot, allocate references.
   snapshot_->AllocateEntries(entries_.entries_count(),
@@ -1991,16 +1880,14 @@
   entries_.UpdateEntries(&allocator);
 
   // Pass 2. Fill references.
-  SnapshotFiller filler(snapshot_, &entries_);
-  filler_ = &filler;
-  iterator.reset();
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    ExtractReferences(obj);
-  }
-  SetRootGcRootsReference();
-  Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+  if (!FillReferences()) return false;
 
-  snapshot_->ApproximateRetainedSizes();
+  if (!SetEntriesDominators()) return false;
+  if (!ApproximateRetainedSizes()) return false;
+
+  progress_counter_ = progress_total_;
+  if (!ReportProgress(true)) return false;
+  return true;
 }
 
 
@@ -2328,6 +2215,181 @@
 }
 
 
+void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
+  if (control_ == NULL) return;
+
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  int objects_count = 0;
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next(), ++objects_count) {}
+  progress_total_ = objects_count * iterations_count;
+  progress_counter_ = 0;
+}
+
+
+bool HeapSnapshotGenerator::CountEntriesAndReferences() {
+  SnapshotCounter counter(&entries_);
+  filler_ = &counter;
+  filler_->AddEntry(HeapSnapshot::kInternalRootObject);
+  filler_->AddEntry(HeapSnapshot::kGcRootsObject);
+  return IterateAndExtractReferences();
+}
+
+
+bool HeapSnapshotGenerator::FillReferences() {
+  SnapshotFiller filler(snapshot_, &entries_);
+  filler_ = &filler;
+  return IterateAndExtractReferences();
+}
+
+
+void HeapSnapshotGenerator::FillReversePostorderIndexes(
+    Vector<HeapEntry*>* entries) {
+  snapshot_->ClearPaint();
+  int current_entry = 0;
+  List<HeapEntry*> nodes_to_visit;
+  nodes_to_visit.Add(snapshot_->root());
+  snapshot_->root()->paint_reachable();
+  while (!nodes_to_visit.is_empty()) {
+    HeapEntry* entry = nodes_to_visit.last();
+    Vector<HeapGraphEdge> children = entry->children();
+    bool has_new_edges = false;
+    for (int i = 0; i < children.length(); ++i) {
+      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
+      HeapEntry* child = children[i].to();
+      if (!child->painted_reachable()) {
+        nodes_to_visit.Add(child);
+        child->paint_reachable();
+        has_new_edges = true;
+      }
+    }
+    if (!has_new_edges) {
+      entry->set_ordered_index(current_entry);
+      (*entries)[current_entry++] = entry;
+      nodes_to_visit.RemoveLast();
+    }
+  }
+  entries->Truncate(current_entry);
+}
+
+
+static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
+  int finger1 = i1, finger2 = i2;
+  while (finger1 != finger2) {
+    while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
+    while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
+  }
+  return finger1;
+}
+
+// The algorithm is based on the article:
+// K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
+// Softw. Pract. Exper. 4 (2001), pp. 1–10.
+bool HeapSnapshotGenerator::BuildDominatorTree(
+    const Vector<HeapEntry*>& entries,
+    Vector<HeapEntry*>* dominators) {
+  if (entries.length() == 0) return true;
+  const int entries_length = entries.length(), root_index = entries_length - 1;
+  for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
+  (*dominators)[root_index] = entries[root_index];
+  int changed = 1;
+  const int base_progress_counter = progress_counter_;
+  while (changed != 0) {
+    changed = 0;
+    for (int i = root_index - 1; i >= 0; --i) {
+      HeapEntry* new_idom = NULL;
+      Vector<HeapGraphEdge*> rets = entries[i]->retainers();
+      int j = 0;
+      for (; j < rets.length(); ++j) {
+        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
+        HeapEntry* ret = rets[j]->From();
+        if (dominators->at(ret->ordered_index()) != NULL) {
+          new_idom = ret;
+          break;
+        }
+      }
+      for (++j; j < rets.length(); ++j) {
+        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
+        HeapEntry* ret = rets[j]->From();
+        if (dominators->at(ret->ordered_index()) != NULL) {
+          new_idom = entries[Intersect(ret->ordered_index(),
+                                       new_idom->ordered_index(),
+                                       *dominators)];
+        }
+      }
+      if (new_idom != NULL && dominators->at(i) != new_idom) {
+        (*dominators)[i] = new_idom;
+        ++changed;
+      }
+    }
+    int remaining = entries_length - changed;
+    if (remaining < 0) remaining = 0;
+    progress_counter_ = base_progress_counter + remaining;
+    if (!ReportProgress(true)) return false;
+  }
+  return true;
+}
+
+
+bool HeapSnapshotGenerator::SetEntriesDominators() {
+  // This array is used for maintaining reverse postorder of nodes.
+  ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
+  FillReversePostorderIndexes(&ordered_entries);
+  ScopedVector<HeapEntry*> dominators(ordered_entries.length());
+  if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
+  for (int i = 0; i < ordered_entries.length(); ++i) {
+    ASSERT(dominators[i] != NULL);
+    ordered_entries[i]->set_dominator(dominators[i]);
+  }
+  return true;
+}
+
+
+bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
+  // As for the dominators tree we only know parent nodes, not
+  // children, to sum up total sizes we "bubble" node's self size
+  // adding it to all of its parents.
+  for (int i = 0; i < snapshot_->entries()->length(); ++i) {
+    HeapEntry* entry = snapshot_->entries()->at(i);
+    entry->set_retained_size(entry->self_size());
+  }
+  for (int i = 0;
+       i < snapshot_->entries()->length();
+       ++i, IncProgressCounter()) {
+    HeapEntry* entry = snapshot_->entries()->at(i);
+    int entry_size = entry->self_size();
+    for (HeapEntry* dominator = entry->dominator();
+         dominator != entry;
+         entry = dominator, dominator = entry->dominator()) {
+      dominator->add_retained_size(entry_size);
+    }
+    if (!ReportProgress()) return false;
+  }
+  return true;
+}
+
+
+bool HeapSnapshotGenerator::IterateAndExtractReferences() {
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  bool interrupted = false;
+  // Heap iteration with filtering must be finished in any case.
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next(), IncProgressCounter()) {
+    if (!interrupted) {
+      ExtractReferences(obj);
+      if (!ReportProgress()) interrupted = true;
+    }
+  }
+  if (interrupted) return false;
+  SetRootGcRootsReference();
+  RootsReferencesExtractor extractor(this);
+  Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG);
+  return ReportProgress();
+}
+
+
 void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
   raw_additions_root_ =
       NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 55c57fd..cacd27e 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -526,7 +526,7 @@
   HeapSnapshot* snapshot() { return snapshot_; }
   Type type() { return static_cast<Type>(type_); }
   const char* name() { return name_; }
-  uint64_t id();
+  inline uint64_t id();
   int self_size() { return self_size_; }
   int retained_size() { return retained_size_; }
   void add_retained_size(int size) { retained_size_ += size; }
@@ -558,13 +558,6 @@
   void ApplyAndPaintAllReachable(Visitor* visitor);
   void PaintAllReachable();
 
-  bool is_leaf() { return painted_ == kLeaf; }
-  void set_leaf() { painted_ = kLeaf; }
-  bool is_non_leaf() { return painted_ == kNonLeaf; }
-  void set_non_leaf() { painted_ = kNonLeaf; }
-  bool is_processed() { return painted_ == kProcessed; }
-  void set_processed() { painted_ = kProcessed; }
-
   void SetIndexedReference(HeapGraphEdge::Type type,
                            int child_index,
                            int index,
@@ -625,10 +618,6 @@
   static const unsigned kUnpainted = 0;
   static const unsigned kPainted = 1;
   static const unsigned kPaintedReachableFromOthers = 2;
-  // Paints used for approximate retained sizes calculation.
-  static const unsigned kLeaf = 0;
-  static const unsigned kNonLeaf = 1;
-  static const unsigned kProcessed = 2;
 
   static const int kExactRetainedSizeTag = 1;
 
@@ -682,6 +671,7 @@
   unsigned uid() { return uid_; }
   HeapEntry* root() { return root_entry_; }
   HeapEntry* gc_roots() { return gc_roots_entry_; }
+  List<HeapEntry*>* entries() { return &entries_; }
 
   void AllocateEntries(
       int entries_count, int children_count, int retainers_count);
@@ -693,9 +683,9 @@
                       int size,
                       int children_count,
                       int retainers_count);
-  void ApproximateRetainedSizes();
   void ClearPaint();
   HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
+  HeapEntry* GetEntryById(uint64_t id);
   List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
   List<HeapEntry*>* GetSortedEntriesList();
   template<class Visitor>
@@ -715,10 +705,6 @@
                       int children_count,
                       int retainers_count);
   HeapEntry* GetNextEntryToInit();
-  void BuildDominatorTree(const Vector<HeapEntry*>& entries,
-                          Vector<HeapEntry*>* dominators);
-  void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
-  void SetEntriesDominators();
 
   HeapSnapshotsCollection* collection_;
   Type type_;
@@ -844,7 +830,7 @@
 
   HeapSnapshot* NewSnapshot(
       HeapSnapshot::Type type, const char* name, unsigned uid);
-  void SnapshotGenerationFinished() { ids_.SnapshotGenerationFinished(); }
+  void SnapshotGenerationFinished(HeapSnapshot* snapshot);
   List<HeapSnapshot*>* snapshots() { return &snapshots_; }
   HeapSnapshot* GetSnapshot(unsigned uid);
 
@@ -967,16 +953,27 @@
                                         HeapEntry* child_entry) = 0;
   };
 
-  explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
-  void GenerateSnapshot();
+  HeapSnapshotGenerator(HeapSnapshot* snapshot,
+                        v8::ActivityControl* control);
+  bool GenerateSnapshot();
 
  private:
+  bool ApproximateRetainedSizes();
+  bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
+                          Vector<HeapEntry*>* dominators);
+  bool CountEntriesAndReferences();
   HeapEntry* GetEntry(Object* obj);
+  void IncProgressCounter() { ++progress_counter_; }
   void ExtractReferences(HeapObject* obj);
   void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
+  bool FillReferences();
+  void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
+  bool IterateAndExtractReferences();
+  inline bool ReportProgress(bool force = false);
+  bool SetEntriesDominators();
   void SetClosureReference(HeapObject* parent_obj,
                            HeapEntry* parent,
                            String* reference_name,
@@ -1008,8 +1005,10 @@
   void SetRootShortcutReference(Object* child);
   void SetRootGcRootsReference();
   void SetGcRootsReference(Object* child);
+  void SetProgressTotal(int iterations_count);
 
   HeapSnapshot* snapshot_;
+  v8::ActivityControl* control_;
   HeapSnapshotsCollection* collection_;
   // Mapping from HeapObject* pointers to HeapEntry* pointers.
   HeapEntriesMap entries_;
@@ -1017,6 +1016,9 @@
   // Used during references extraction to mark heap objects that
   // are references via non-hidden properties.
   HeapObjectsSet known_references_;
+  // Used during snapshot generation.
+  int progress_counter_;
+  int progress_total_;
 
   friend class IndexedReferencesExtractor;
   friend class RootsReferencesExtractor;
diff --git a/src/property.cc b/src/property.cc
index b579b68..9677433 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -31,62 +31,62 @@
 namespace internal {
 
 
-#ifdef DEBUG
-void LookupResult::Print() {
+#ifdef OBJECT_PRINT
+void LookupResult::Print(FILE* out) {
   if (!IsFound()) {
-    PrintF("Not Found\n");
+    PrintF(out, "Not Found\n");
     return;
   }
 
-  PrintF("LookupResult:\n");
-  PrintF(" -cacheable = %s\n", IsCacheable() ? "true" : "false");
-  PrintF(" -attributes = %x\n", GetAttributes());
+  PrintF(out, "LookupResult:\n");
+  PrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
+  PrintF(out, " -attributes = %x\n", GetAttributes());
   switch (type()) {
     case NORMAL:
-      PrintF(" -type = normal\n");
-      PrintF(" -entry = %d", GetDictionaryEntry());
+      PrintF(out, " -type = normal\n");
+      PrintF(out, " -entry = %d", GetDictionaryEntry());
       break;
     case MAP_TRANSITION:
-      PrintF(" -type = map transition\n");
-      PrintF(" -map:\n");
-      GetTransitionMap()->Print();
-      PrintF("\n");
+      PrintF(out, " -type = map transition\n");
+      PrintF(out, " -map:\n");
+      GetTransitionMap()->Print(out);
+      PrintF(out, "\n");
       break;
     case CONSTANT_FUNCTION:
-      PrintF(" -type = constant function\n");
-      PrintF(" -function:\n");
-      GetConstantFunction()->Print();
-      PrintF("\n");
+      PrintF(out, " -type = constant function\n");
+      PrintF(out, " -function:\n");
+      GetConstantFunction()->Print(out);
+      PrintF(out, "\n");
       break;
     case FIELD:
-      PrintF(" -type = field\n");
-      PrintF(" -index = %d", GetFieldIndex());
-      PrintF("\n");
+      PrintF(out, " -type = field\n");
+      PrintF(out, " -index = %d", GetFieldIndex());
+      PrintF(out, "\n");
       break;
     case CALLBACKS:
-      PrintF(" -type = call backs\n");
-      PrintF(" -callback object:\n");
-      GetCallbackObject()->Print();
+      PrintF(out, " -type = call backs\n");
+      PrintF(out, " -callback object:\n");
+      GetCallbackObject()->Print(out);
       break;
     case INTERCEPTOR:
-      PrintF(" -type = lookup interceptor\n");
+      PrintF(out, " -type = lookup interceptor\n");
       break;
     case CONSTANT_TRANSITION:
-      PrintF(" -type = constant property transition\n");
+      PrintF(out, " -type = constant property transition\n");
       break;
     case NULL_DESCRIPTOR:
-      PrintF(" =type = null descriptor\n");
+      PrintF(out, " =type = null descriptor\n");
       break;
   }
 }
 
 
-void Descriptor::Print() {
-  PrintF("Descriptor ");
-  GetKey()->ShortPrint();
-  PrintF(" @ ");
-  GetValue()->ShortPrint();
-  PrintF(" %d\n", GetDetails().index());
+void Descriptor::Print(FILE* out) {
+  PrintF(out, "Descriptor ");
+  GetKey()->ShortPrint(out);
+  PrintF(out, " @ ");
+  GetValue()->ShortPrint(out);
+  PrintF(out, " %d\n", GetDetails().index());
 }
 
 
diff --git a/src/property.h b/src/property.h
index 4715a72..c39fe41 100644
--- a/src/property.h
+++ b/src/property.h
@@ -60,8 +60,8 @@
   Object* GetValue() { return value_; }
   PropertyDetails GetDetails() { return details_; }
 
-#ifdef DEBUG
-  void Print();
+#ifdef OBJECT_PRINT
+  void Print(FILE* out);
 #endif
 
   void SetEnumerationIndex(int index) {
@@ -266,12 +266,26 @@
     return Map::cast(GetValue());
   }
 
+  Map* GetTransitionMapFromMap(Map* map) {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == MAP_TRANSITION);
+    return Map::cast(map->instance_descriptors()->GetValue(number_));
+  }
+
   int GetFieldIndex() {
     ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
     ASSERT(type() == FIELD);
     return Descriptor::IndexFromValue(GetValue());
   }
 
+  int GetLocalFieldIndexFromMap(Map* map) {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == FIELD);
+    return Descriptor::IndexFromValue(
+        map->instance_descriptors()->GetValue(number_)) -
+        map->inobject_properties();
+  }
+
   int GetDictionaryEntry() {
     ASSERT(lookup_type_ == DICTIONARY_TYPE);
     return number_;
@@ -282,6 +296,12 @@
     return JSFunction::cast(GetValue());
   }
 
+  JSFunction* GetConstantFunctionFromMap(Map* map) {
+    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
+    ASSERT(type() == CONSTANT_FUNCTION);
+    return JSFunction::cast(map->instance_descriptors()->GetValue(number_));
+  }
+
   Object* GetCallbackObject() {
     if (lookup_type_ == CONSTANT_TYPE) {
       // For now we only have the __proto__ as constant type.
@@ -290,8 +310,8 @@
     return GetValue();
   }
 
-#ifdef DEBUG
-  void Print();
+#ifdef OBJECT_PRINT
+  void Print(FILE* out);
 #endif
 
   Object* GetValue() {
diff --git a/src/regexp.js b/src/regexp.js
index d01d04f..0de66c6 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -32,7 +32,7 @@
 
 // A recursive descent parser for Patterns according to the grammar of
 // ECMA-262 15.10.1, with deviations noted below.
-function DoConstructRegExp(object, pattern, flags, isConstructorCall) {
+function DoConstructRegExp(object, pattern, flags) {
   // RegExp : Called as constructor; see ECMA-262, section 15.10.4.
   if (IS_REGEXP(pattern)) {
     if (!IS_UNDEFINED(flags)) {
@@ -80,7 +80,7 @@
 
 function RegExpConstructor(pattern, flags) {
   if (%_IsConstructCall()) {
-    DoConstructRegExp(this, pattern, flags, true);
+    DoConstructRegExp(this, pattern, flags);
   } else {
     // RegExp : Called as function; see ECMA-262, section 15.10.3.1.
     if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
@@ -104,9 +104,9 @@
   // the empty string.  For compatibility with JSC, we match their
   // behavior.
   if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
-    DoConstructRegExp(this, 'undefined', flags, false);
+    DoConstructRegExp(this, 'undefined', flags);
   } else {
-    DoConstructRegExp(this, pattern, flags, false);
+    DoConstructRegExp(this, pattern, flags);
   }
 }
 
@@ -120,22 +120,28 @@
 
 function BuildResultFromMatchInfo(lastMatchInfo, s) {
   var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1;
-  var result = %_RegExpConstructResult(numResults, lastMatchInfo[CAPTURE0], s);
-  if (numResults === 1) {
-    var matchStart = lastMatchInfo[CAPTURE(0)];
-    var matchEnd = lastMatchInfo[CAPTURE(1)];
-    result[0] = SubString(s, matchStart, matchEnd);
+  var start = lastMatchInfo[CAPTURE0];
+  var end = lastMatchInfo[CAPTURE1];
+  var result = %_RegExpConstructResult(numResults, start, s);
+  if (start + 1 == end) {
+    result[0] = %_StringCharAt(s, start);
   } else {
-    for (var i = 0; i < numResults; i++) {
-      var matchStart = lastMatchInfo[CAPTURE(i << 1)];
-      var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)];
-      if (matchStart != -1 && matchEnd != -1) {
-        result[i] = SubString(s, matchStart, matchEnd);
+    result[0] = %_SubString(s, start, end);
+  }
+  var j = REGEXP_FIRST_CAPTURE + 2;
+  for (var i = 1; i < numResults; i++) {
+    start = lastMatchInfo[j++];
+    end = lastMatchInfo[j++];
+    if (end != -1) {
+      if (start + 1 == end) {
+        result[i] = %_StringCharAt(s, start);
       } else {
-        // Make sure the element is present. Avoid reading the undefined
-        // property from the global object since this may change.
-        result[i] = void 0;
+        result[i] = %_SubString(s, start, end);
       }
+    } else {
+      // Make sure the element is present. Avoid reading the undefined
+      // property from the global object since this may change.
+      result[i] = void 0;
     }
   }
   return result;
@@ -144,12 +150,12 @@
 
 function RegExpExecNoTests(regexp, string, start) {
   // Must be called with RegExp, string and positive integer as arguments.
-  var matchInfo = DoRegExpExec(regexp, string, start);
-  var result = null;
+  var matchInfo = %_RegExpExec(regexp, string, start, lastMatchInfo);
   if (matchInfo !== null) {
-    result = BuildResultFromMatchInfo(matchInfo, string);
+    lastMatchInfoOverride = null;
+    return BuildResultFromMatchInfo(matchInfo, string);
   }
-  return result;
+  return null;
 }
 
 
@@ -166,12 +172,7 @@
     }
     string = regExpInput;
   }
-  var s;
-  if (IS_STRING(string)) {
-    s = string;
-  } else {
-    s = ToString(string);
-  }
+  string = TO_STRING_INLINE(string);
   var lastIndex = this.lastIndex;
 
   // Conversion is required by the ES5 specification (RegExp.prototype.exec
@@ -180,7 +181,7 @@
 
   var global = this.global;
   if (global) {
-    if (i < 0 || i > s.length) {
+    if (i < 0 || i > string.length) {
       this.lastIndex = 0;
       return null;
     }
@@ -188,9 +189,9 @@
     i = 0;
   }
 
-  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+  %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
   // matchIndices is either null or the lastMatchInfo array.
-  var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
+  var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
 
   if (matchIndices === null) {
     if (global) this.lastIndex = 0;
@@ -202,7 +203,7 @@
   if (global) {
     this.lastIndex = lastMatchInfo[CAPTURE1];
   }
-  return BuildResultFromMatchInfo(matchIndices, s);
+  return BuildResultFromMatchInfo(matchIndices, string);
 }
 
 
@@ -227,12 +228,7 @@
     string = regExpInput;
   }
 
-  var s;
-  if (IS_STRING(string)) {
-    s = string;
-  } else {
-    s = ToString(string);
-  }
+  string = TO_STRING_INLINE(string);
 
   var lastIndex = this.lastIndex;
 
@@ -241,13 +237,13 @@
   var i = TO_INTEGER(lastIndex);
   
   if (this.global) {
-    if (i < 0 || i > s.length) {
+    if (i < 0 || i > string.length) {
       this.lastIndex = 0;
       return false;
     }
-    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
     // matchIndices is either null or the lastMatchInfo array.
-    var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
+    var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
     if (matchIndices === null) {
       this.lastIndex = 0;
       return false;
@@ -265,15 +261,18 @@
         %_StringCharCodeAt(this.source, 2) != 63) {  // '?'
       if (!%_ObjectEquals(regexp_key, this)) {
         regexp_key = this;
-        regexp_val = new $RegExp(this.source.substring(2, this.source.length),
-                                 (this.ignoreCase ? 'i' : '')
-                                 + (this.multiline ? 'm' : ''));
+        regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
+                                 (!this.ignoreCase 
+                                  ? !this.multiline ? "" : "m"
+                                  : !this.multiline ? "i" : "im"));
       }
-      if (!regexp_val.test(s)) return false;
+      if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
+        return false;
+      }
     }    
-    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
+    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
     // matchIndices is either null or the lastMatchInfo array.
-    var matchIndices = %_RegExpExec(this, s, 0, lastMatchInfo);
+    var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
     if (matchIndices === null) return false;
     lastMatchInfoOverride = null;
     return true;
diff --git a/src/rewriter.cc b/src/rewriter.cc
index b6f8240..3d737a4 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -222,11 +222,6 @@
 }
 
 
-void AstOptimizer::VisitSlot(Slot* node) {
-  USE(node);
-}
-
-
 void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
   Variable* var = node->AsVariable();
   if (var != NULL) {
@@ -686,7 +681,7 @@
 
 class Processor: public AstVisitor {
  public:
-  explicit Processor(VariableProxy* result)
+  explicit Processor(Variable* result)
       : result_(result),
         result_assigned_(false),
         is_set_(false),
@@ -697,7 +692,7 @@
   bool result_assigned() const { return result_assigned_; }
 
  private:
-  VariableProxy* result_;
+  Variable* result_;
 
   // We are not tracking result usage via the result_'s use
   // counts (we leave the accurate computation to the
@@ -714,7 +709,8 @@
 
   Expression* SetResult(Expression* value) {
     result_assigned_ = true;
-    return new Assignment(Token::ASSIGN, result_, value,
+    VariableProxy* result_proxy = new VariableProxy(result_);
+    return new Assignment(Token::ASSIGN, result_proxy, value,
                           RelocInfo::kNoPosition);
   }
 
@@ -869,12 +865,6 @@
 }
 
 
-void Processor::VisitSlot(Slot* node) {
-  USE(node);
-  UNREACHABLE();
-}
-
-
 void Processor::VisitVariableProxy(VariableProxy* node) {
   USE(node);
   UNREACHABLE();
@@ -999,12 +989,15 @@
 
   ZoneList<Statement*>* body = function->body();
   if (!body->is_empty()) {
-    VariableProxy* result = scope->NewTemporary(Factory::result_symbol());
+    Variable* result = scope->NewTemporary(Factory::result_symbol());
     Processor processor(result);
     processor.Process(body);
     if (processor.HasStackOverflow()) return false;
 
-    if (processor.result_assigned()) body->Add(new ReturnStatement(result));
+    if (processor.result_assigned()) {
+      VariableProxy* result_proxy = new VariableProxy(result);
+      body->Add(new ReturnStatement(result_proxy));
+    }
   }
 
   return true;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
new file mode 100644
index 0000000..c53ddd2
--- /dev/null
+++ b/src/runtime-profiler.cc
@@ -0,0 +1,458 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "runtime-profiler.h"
+
+#include "assembler.h"
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "deoptimizer.h"
+#include "execution.h"
+#include "global-handles.h"
+#include "scopeinfo.h"
+#include "top.h"
+
+namespace v8 {
+namespace internal {
+
+
+class PendingListNode : public Malloced {
+ public:
+  explicit PendingListNode(JSFunction* function);
+  ~PendingListNode() { Destroy(); }
+
+  PendingListNode* next() const { return next_; }
+  void set_next(PendingListNode* node) { next_ = node; }
+  Handle<JSFunction> function() { return Handle<JSFunction>::cast(function_); }
+
+  // If the function is garbage collected before we've had the chance
+  // to optimize it the weak handle will be null.
+  bool IsValid() { return !function_.is_null(); }
+
+  // Returns the number of microseconds this node has been pending.
+  int Delay() const { return static_cast<int>(OS::Ticks() - start_); }
+
+ private:
+  void Destroy();
+  static void WeakCallback(v8::Persistent<v8::Value> object, void* data);
+
+  PendingListNode* next_;
+  Handle<Object> function_;  // Weak handle.
+  int64_t start_;
+};
+
+
+enum SamplerState {
+  IN_NON_JS_STATE = 0,
+  IN_JS_STATE = 1
+};
+
+
+// Optimization sampler constants.
+static const int kSamplerFrameCount = 2;
+static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
+static const int kSamplerWindowSize = 16;
+
+static const int kSamplerTicksBetweenThresholdAdjustment = 32;
+
+static const int kSamplerThresholdInit = 3;
+static const int kSamplerThresholdMin = 1;
+static const int kSamplerThresholdDelta = 1;
+
+static const int kSamplerThresholdSizeFactorInit = 3;
+static const int kSamplerThresholdSizeFactorMin = 1;
+static const int kSamplerThresholdSizeFactorDelta = 1;
+
+static const int kSizeLimit = 1500;
+
+static int sampler_threshold = kSamplerThresholdInit;
+static int sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
+
+static int sampler_ticks_until_threshold_adjustment =
+    kSamplerTicksBetweenThresholdAdjustment;
+
+// The ratio of ticks spent in JS code in percent.
+static Atomic32 js_ratio;
+
+// The JSFunctions in the sampler window are not GC safe. Old-space
+// pointers are not cleared during mark-sweep collection and therefore
+// the window might contain stale pointers. The window is updated on
+// scavenges and (parts of it) cleared on mark-sweep and
+// mark-sweep-compact.
+static Object* sampler_window[kSamplerWindowSize] = { NULL, };
+static int sampler_window_position = 0;
+static int sampler_window_weight[kSamplerWindowSize] = { 0, };
+
+
+// Support for pending 'optimize soon' requests.
+static PendingListNode* optimize_soon_list = NULL;
+
+
+PendingListNode::PendingListNode(JSFunction* function) : next_(NULL) {
+  function_ = GlobalHandles::Create(function);
+  start_ = OS::Ticks();
+  GlobalHandles::MakeWeak(function_.location(), this, &WeakCallback);
+}
+
+
+void PendingListNode::Destroy() {
+  if (!IsValid()) return;
+  GlobalHandles::Destroy(function_.location());
+  function_= Handle<Object>::null();
+}
+
+
+void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
+  reinterpret_cast<PendingListNode*>(data)->Destroy();
+}
+
+
+static bool IsOptimizable(JSFunction* function) {
+  Code* code = function->code();
+  return code->kind() == Code::FUNCTION && code->optimizable();
+}
+
+
+static void Optimize(JSFunction* function, bool eager, int delay) {
+  ASSERT(IsOptimizable(function));
+  if (FLAG_trace_opt) {
+    PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
+    function->PrintName();
+    PrintF(" for recompilation");
+    if (delay > 0) {
+      PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
+    }
+    PrintF("]\n");
+  }
+
+  // The next call to the function will trigger optimization.
+  function->MarkForLazyRecompilation();
+}
+
+
+static void AttemptOnStackReplacement(JSFunction* function) {
+  // See AlwaysFullCompiler (in compiler.cc) comment on why we need
+  // Debug::has_break_points().
+  ASSERT(function->IsMarkedForLazyRecompilation());
+  if (!FLAG_use_osr || Debug::has_break_points() || function->IsBuiltin()) {
+    return;
+  }
+
+  SharedFunctionInfo* shared = function->shared();
+  // If the code is not optimizable, don't try OSR.
+  if (!shared->code()->optimizable()) return;
+
+  // We are not prepared to do OSR for a function that already has an
+  // allocated arguments object.  The optimized code would bypass it for
+  // arguments accesses, which is unsound.  Don't try OSR.
+  if (shared->scope_info()->HasArgumentsShadow()) return;
+
+  // We're using on-stack replacement: patch the unoptimized code so that
+  // any back edge in any unoptimized frame will trigger on-stack
+  // replacement for that frame.
+  if (FLAG_trace_osr) {
+    PrintF("[patching stack checks in ");
+    function->PrintName();
+    PrintF(" for on-stack replacement]\n");
+  }
+
+  // Get the stack check stub code object to match against.  We aren't
+  // prepared to generate it, but we don't expect to have to.
+  StackCheckStub check_stub;
+  Object* check_code;
+  MaybeObject* maybe_check_code = check_stub.TryGetCode();
+  if (maybe_check_code->ToObject(&check_code)) {
+    Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
+    Code* unoptimized_code = shared->code();
+    // Iterate the unoptimized code and patch every stack check except at
+    // the function entry.  This code assumes the function entry stack
+    // check appears first i.e., is not deferred or otherwise reordered.
+    bool first = true;
+    for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
+         !it.done();
+         it.next()) {
+      RelocInfo* rinfo = it.rinfo();
+      if (rinfo->target_address() == Code::cast(check_code)->entry()) {
+        if (first) {
+          first = false;
+        } else {
+          Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
+        }
+      }
+    }
+  }
+}
+
+
+static void ClearSampleBuffer() {
+  for (int i = 0; i < kSamplerWindowSize; i++) {
+    sampler_window[i] = NULL;
+    sampler_window_weight[i] = 0;
+  }
+}
+
+
+static void ClearSampleBufferNewSpaceEntries() {
+  for (int i = 0; i < kSamplerWindowSize; i++) {
+    if (Heap::InNewSpace(sampler_window[i])) {
+      sampler_window[i] = NULL;
+      sampler_window_weight[i] = 0;
+    }
+  }
+}
+
+
+static int LookupSample(JSFunction* function) {
+  int weight = 0;
+  for (int i = 0; i < kSamplerWindowSize; i++) {
+    Object* sample = sampler_window[i];
+    if (sample != NULL) {
+      if (function == sample) {
+        weight += sampler_window_weight[i];
+      }
+    }
+  }
+  return weight;
+}
+
+
+static void AddSample(JSFunction* function, int weight) {
+  ASSERT(IsPowerOf2(kSamplerWindowSize));
+  sampler_window[sampler_window_position] = function;
+  sampler_window_weight[sampler_window_position] = weight;
+  sampler_window_position = (sampler_window_position + 1) &
+      (kSamplerWindowSize - 1);
+}
+
+
+void RuntimeProfiler::OptimizeNow() {
+  HandleScope scope;
+  PendingListNode* current = optimize_soon_list;
+  while (current != NULL) {
+    PendingListNode* next = current->next();
+    if (current->IsValid()) {
+      Handle<JSFunction> function = current->function();
+      int delay = current->Delay();
+      if (IsOptimizable(*function)) {
+        Optimize(*function, true, delay);
+      }
+    }
+    delete current;
+    current = next;
+  }
+  optimize_soon_list = NULL;
+
+  // Run through the JavaScript frames and collect them. If we already
+  // have a sample of the function, we mark it for optimizations
+  // (eagerly or lazily).
+  JSFunction* samples[kSamplerFrameCount];
+  int sample_count = 0;
+  int frame_count = 0;
+  for (JavaScriptFrameIterator it;
+       frame_count++ < kSamplerFrameCount && !it.done();
+       it.Advance()) {
+    JavaScriptFrame* frame = it.frame();
+    JSFunction* function = JSFunction::cast(frame->function());
+
+    // Adjust threshold each time we have processed
+    // a certain number of ticks.
+    if (sampler_ticks_until_threshold_adjustment > 0) {
+      sampler_ticks_until_threshold_adjustment--;
+      if (sampler_ticks_until_threshold_adjustment <= 0) {
+        // If the threshold is not already at the minimum
+        // modify and reset the ticks until next adjustment.
+        if (sampler_threshold > kSamplerThresholdMin) {
+          sampler_threshold -= kSamplerThresholdDelta;
+          sampler_ticks_until_threshold_adjustment =
+              kSamplerTicksBetweenThresholdAdjustment;
+        }
+      }
+    }
+
+    if (function->IsMarkedForLazyRecompilation()) {
+      Code* unoptimized = function->shared()->code();
+      int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+      if (nesting == 0) AttemptOnStackReplacement(function);
+      int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
+      unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+    }
+
+    // Do not record non-optimizable functions.
+    if (!IsOptimizable(function)) continue;
+    samples[sample_count++] = function;
+
+    int function_size = function->shared()->SourceSize();
+    int threshold_size_factor = (function_size > kSizeLimit)
+        ? sampler_threshold_size_factor
+        : 1;
+
+    int threshold = sampler_threshold * threshold_size_factor;
+    int current_js_ratio = NoBarrier_Load(&js_ratio);
+
+    // Adjust threshold depending on the ratio of time spent
+    // in JS code.
+    if (current_js_ratio < 20) {
+      // If we spend less than 20% of the time in JS code,
+      // do not optimize.
+      continue;
+    } else if (current_js_ratio < 75) {
+      // Below 75% of time spent in JS code, only optimize very
+      // frequently used functions.
+      threshold *= 3;
+    }
+
+    if (LookupSample(function) >= threshold) {
+      Optimize(function, false, 0);
+      CompilationCache::MarkForEagerOptimizing(Handle<JSFunction>(function));
+    }
+  }
+
+  // Add the collected functions as samples. It's important not to do
+  // this as part of collecting them because this will interfere with
+  // the sample lookup in case of recursive functions.
+  for (int i = 0; i < sample_count; i++) {
+    AddSample(samples[i], kSamplerFrameWeight[i]);
+  }
+}
+
+
+void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
+  if (!IsOptimizable(function)) return;
+  PendingListNode* node = new PendingListNode(function);
+  node->set_next(optimize_soon_list);
+  optimize_soon_list = node;
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+static void UpdateStateRatio(SamplerState current_state) {
+  static const int kStateWindowSize = 128;
+  static SamplerState state_window[kStateWindowSize];
+  static int state_window_position = 0;
+  static int state_counts[2] = { kStateWindowSize, 0 };
+
+  SamplerState old_state = state_window[state_window_position];
+  state_counts[old_state]--;
+  state_window[state_window_position] = current_state;
+  state_counts[current_state]++;
+  ASSERT(IsPowerOf2(kStateWindowSize));
+  state_window_position = (state_window_position + 1) &
+      (kStateWindowSize - 1);
+  NoBarrier_Store(&js_ratio, state_counts[IN_JS_STATE] * 100 /
+                  kStateWindowSize);
+}
+#endif
+
+
+void RuntimeProfiler::NotifyTick() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // Record state sample.
+  SamplerState state = Top::IsInJSState()
+      ? IN_JS_STATE
+      : IN_NON_JS_STATE;
+  UpdateStateRatio(state);
+  StackGuard::RequestRuntimeProfilerTick();
+#endif
+}
+
+
+void RuntimeProfiler::MarkCompactPrologue(bool is_compacting) {
+  if (is_compacting) {
+    // Clear all samples before mark-sweep-compact because every
+    // function might move.
+    ClearSampleBuffer();
+  } else {
+    // Clear only new space entries on mark-sweep since none of the
+    // old-space functions will move.
+    ClearSampleBufferNewSpaceEntries();
+  }
+}
+
+
+bool IsEqual(void* first, void* second) {
+  return first == second;
+}
+
+
+void RuntimeProfiler::Setup() {
+  ClearSampleBuffer();
+  // If the ticker hasn't already started, make sure to do so to get
+  // the ticks for the runtime profiler.
+  if (IsEnabled()) Logger::EnsureTickerStarted();
+}
+
+
+void RuntimeProfiler::Reset() {
+  sampler_threshold = kSamplerThresholdInit;
+  sampler_ticks_until_threshold_adjustment =
+      kSamplerTicksBetweenThresholdAdjustment;
+  sampler_threshold_size_factor = kSamplerThresholdSizeFactorInit;
+}
+
+
+void RuntimeProfiler::TearDown() {
+  // Nothing to do.
+}
+
+
+Object** RuntimeProfiler::SamplerWindowAddress() {
+  return sampler_window;
+}
+
+
+int RuntimeProfiler::SamplerWindowSize() {
+  return kSamplerWindowSize;
+}
+
+
+bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static const int kNonJSTicksThreshold = 100;
+  // We suspend the runtime profiler thread when not running
+  // JavaScript. If the CPU profiler is active we must not do this
+  // because it samples both JavaScript and C++ code.
+  if (RuntimeProfiler::IsEnabled() &&
+      !CpuProfiler::is_profiling() &&
+      !(FLAG_prof && FLAG_prof_auto)) {
+    if (Top::IsInJSState()) {
+      non_js_ticks_ = 0;
+    } else {
+      if (non_js_ticks_ < kNonJSTicksThreshold) {
+        ++non_js_ticks_;
+      } else {
+        if (Top::WaitForJSState()) return true;
+      }
+    }
+  }
+#endif
+  return false;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
new file mode 100644
index 0000000..e041c05
--- /dev/null
+++ b/src/runtime-profiler.h
@@ -0,0 +1,76 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_RUNTIME_PROFILER_H_
+#define V8_RUNTIME_PROFILER_H_
+
+#include "v8.h"
+#include "allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class RuntimeProfiler : public AllStatic {
+ public:
+  static bool IsEnabled() { return V8::UseCrankshaft() && FLAG_opt; }
+
+  static void OptimizeNow();
+  static void OptimizeSoon(JSFunction* function);
+
+  static void NotifyTick();
+
+  static void Setup();
+  static void Reset();
+  static void TearDown();
+
+  static void MarkCompactPrologue(bool is_compacting);
+  static Object** SamplerWindowAddress();
+  static int SamplerWindowSize();
+};
+
+
+// Rate limiter intended to be used in the profiler thread.
+class RuntimeProfilerRateLimiter BASE_EMBEDDED {
+ public:
+  RuntimeProfilerRateLimiter() : non_js_ticks_(0) { }
+
+  // Suspends the current thread when not executing JavaScript to
+  // minimize CPU usage. Returns whether this thread was suspended
+  // (and so might have to check whether profiling is still active.)
+  //
+  // Does nothing when runtime profiling is not enabled.
+  bool SuspendIfNecessary();
+
+ private:
+  int non_js_ticks_;
+
+  DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_RUNTIME_PROFILER_H_
diff --git a/src/runtime.cc b/src/runtime.cc
index 2324e62..724a436 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,16 +33,19 @@
 #include "api.h"
 #include "arguments.h"
 #include "codegen.h"
+#include "compilation-cache.h"
 #include "compiler.h"
 #include "cpu.h"
 #include "dateparser-inl.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "execution.h"
 #include "jsregexp.h"
 #include "liveedit.h"
 #include "parser.h"
 #include "platform.h"
 #include "runtime.h"
+#include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "smart-pointer.h"
 #include "stub-cache.h"
@@ -635,90 +638,6 @@
 }
 
 
-static bool CheckAccessException(LookupResult* result,
-                                 v8::AccessType access_type) {
-  if (result->type() == CALLBACKS) {
-    Object* callback = result->GetCallbackObject();
-    if (callback->IsAccessorInfo()) {
-      AccessorInfo* info = AccessorInfo::cast(callback);
-      bool can_access =
-          (access_type == v8::ACCESS_HAS &&
-              (info->all_can_read() || info->all_can_write())) ||
-          (access_type == v8::ACCESS_GET && info->all_can_read()) ||
-          (access_type == v8::ACCESS_SET && info->all_can_write());
-      return can_access;
-    }
-  }
-
-  return false;
-}
-
-
-static bool CheckAccess(JSObject* obj,
-                        String* name,
-                        LookupResult* result,
-                        v8::AccessType access_type) {
-  ASSERT(result->IsProperty());
-
-  JSObject* holder = result->holder();
-  JSObject* current = obj;
-  while (true) {
-    if (current->IsAccessCheckNeeded() &&
-        !Top::MayNamedAccess(current, name, access_type)) {
-      // Access check callback denied the access, but some properties
-      // can have a special permissions which override callbacks descision
-      // (currently see v8::AccessControl).
-      break;
-    }
-
-    if (current == holder) {
-      return true;
-    }
-
-    current = JSObject::cast(current->GetPrototype());
-  }
-
-  // API callbacks can have per callback access exceptions.
-  switch (result->type()) {
-    case CALLBACKS: {
-      if (CheckAccessException(result, access_type)) {
-        return true;
-      }
-      break;
-    }
-    case INTERCEPTOR: {
-      // If the object has an interceptor, try real named properties.
-      // Overwrite the result to fetch the correct property later.
-      holder->LookupRealNamedProperty(name, result);
-      if (result->IsProperty()) {
-        if (CheckAccessException(result, access_type)) {
-          return true;
-        }
-      }
-      break;
-    }
-    default:
-      break;
-  }
-
-  Top::ReportFailedAccessCheck(current, access_type);
-  return false;
-}
-
-
-// TODO(1095): we should traverse hidden prototype hierachy as well.
-static bool CheckElementAccess(JSObject* obj,
-                               uint32_t index,
-                               v8::AccessType access_type) {
-  if (obj->IsAccessCheckNeeded() &&
-      !Top::MayIndexedAccess(obj, index, access_type)) {
-    return false;
-  }
-
-  return true;
-}
-
-
 // Enumerator used as indices into the array returned from GetOwnProperty
 enum PropertyDescriptorIndices {
   IS_ACCESSOR_INDEX,
@@ -761,7 +680,7 @@
         // subsequent cases.
         Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
         Handle<String> str(String::cast(js_value->value()));
-        Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
+        Handle<String> substr = SubString(str, index, index+1, NOT_TENURED);
 
         elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
         elms->set(VALUE_INDEX, *substr);
@@ -774,7 +693,8 @@
       case JSObject::INTERCEPTED_ELEMENT:
       case JSObject::FAST_ELEMENT: {
         elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-        elms->set(VALUE_INDEX, *GetElement(obj, index));
+        Handle<Object> element = GetElement(Handle<Object>(obj), index);
+        elms->set(VALUE_INDEX, *element);
         elms->set(WRITABLE_INDEX, Heap::true_value());
         elms->set(ENUMERABLE_INDEX,  Heap::true_value());
         elms->set(CONFIGURABLE_INDEX, Heap::true_value());
@@ -782,14 +702,7 @@
       }
 
       case JSObject::DICTIONARY_ELEMENT: {
-        Handle<JSObject> holder = obj;
-        if (obj->IsJSGlobalProxy()) {
-          Object* proto = obj->GetPrototype();
-          if (proto->IsNull()) return Heap::undefined_value();
-          ASSERT(proto->IsJSGlobalObject());
-          holder = Handle<JSObject>(JSObject::cast(proto));
-        }
-        NumberDictionary* dictionary = holder->element_dictionary();
+        NumberDictionary* dictionary = obj->element_dictionary();
         int entry = dictionary->FindEntry(index);
         ASSERT(entry != NumberDictionary::kNotFound);
         PropertyDetails details = dictionary->DetailsAt(entry);
@@ -799,18 +712,14 @@
             FixedArray* callbacks =
                 FixedArray::cast(dictionary->ValueAt(entry));
             elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
-            if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
-              elms->set(GETTER_INDEX, callbacks->get(0));
-            }
-            if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
-              elms->set(SETTER_INDEX, callbacks->get(1));
-            }
+            elms->set(GETTER_INDEX, callbacks->get(0));
+            elms->set(SETTER_INDEX, callbacks->get(1));
             break;
           }
           case NORMAL:
             // This is a data property.
             elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-            elms->set(VALUE_INDEX, *GetElement(obj, index));
+            elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
             elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
             break;
           default:
@@ -830,41 +739,35 @@
   if (!result.IsProperty()) {
     return Heap::undefined_value();
   }
-
-  if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
-    return Heap::false_value();
+  if (result.type() == CALLBACKS) {
+    Object* structure = result.GetCallbackObject();
+    if (structure->IsProxy() || structure->IsAccessorInfo()) {
+      // Property that is internally implemented as a callback or
+      // an API defined callback.
+      Object* value;
+      { MaybeObject* maybe_value = obj->GetPropertyWithCallback(
+            *obj, structure, *name, result.holder());
+        if (!maybe_value->ToObject(&value)) return maybe_value;
+      }
+      elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+      elms->set(VALUE_INDEX, value);
+      elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
+    } else if (structure->IsFixedArray()) {
+      // __defineGetter__/__defineSetter__ callback.
+      elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+      elms->set(GETTER_INDEX, FixedArray::cast(structure)->get(0));
+      elms->set(SETTER_INDEX, FixedArray::cast(structure)->get(1));
+    } else {
+      return Heap::undefined_value();
+    }
+  } else {
+    elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+    elms->set(VALUE_INDEX, result.GetLazyValue());
+    elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
   }
 
   elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
   elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
-
-  bool is_js_accessor = (result.type() == CALLBACKS) &&
-                        (result.GetCallbackObject()->IsFixedArray());
-
-  if (is_js_accessor) {
-    // __defineGetter__/__defineSetter__ callback.
-    elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
-
-    FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
-    if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
-      elms->set(GETTER_INDEX, structure->get(0));
-    }
-    if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
-      elms->set(SETTER_INDEX, structure->get(1));
-    }
-  } else {
-    elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
-    elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
-
-    PropertyAttributes attrs;
-    Object* value;
-    // GetProperty will check access and report any violations.
-    { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
-      if (!maybe_value->ToObject(&value)) return maybe_value;
-    }
-    elms->set(VALUE_INDEX, value);
-  }
-
   return *desc;
 }
 
@@ -1740,14 +1643,13 @@
 static MaybeObject* Runtime_FunctionGetPositionForOffset(Arguments args) {
   ASSERT(args.length() == 2);
 
-  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(Code, code, args[0]);
   CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
 
-  Code* code = fun->code();
   RUNTIME_ASSERT(0 <= offset && offset < code->Size());
 
   Address pc = code->address() + offset;
-  return Smi::FromInt(fun->code()->SourcePosition(pc));
+  return Smi::FromInt(code->SourcePosition(pc));
 }
 
 
@@ -1824,10 +1726,14 @@
     if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
       return Failure::Exception();
     }
+    // Since we don't store the source for this we should never
+    // optimize this.
+    shared->code()->set_optimizable(false);
+
     // Set the code, scope info, formal parameter count,
     // and the length of the target function.
     target->shared()->set_code(shared->code());
-    target->set_code(shared->code());
+    target->ReplaceCode(shared->code());
     target->shared()->set_scope_info(shared->scope_info());
     target->shared()->set_length(shared->length());
     target->shared()->set_formal_parameter_count(
@@ -1857,6 +1763,7 @@
     // It's okay to skip the write barrier here because the literals
     // are guaranteed to be in old space.
     target->set_literals(*literals, SKIP_WRITE_BARRIER);
+    target->set_next_function_link(Heap::undefined_value());
   }
 
   target->set_context(*context);
@@ -2119,10 +2026,7 @@
   }
 
   Handle<JSArray> GetParts() {
-    Handle<JSArray> result =
-        Factory::NewJSArrayWithElements(array_builder_.array());
-    result->set_length(Smi::FromInt(array_builder_.length()));
-    return result;
+    return array_builder_.ToJSArray();
   }
 
  private:
@@ -2697,7 +2601,7 @@
 
 // Perform string match of pattern on subject, starting at start index.
 // Caller must ensure that 0 <= start_index <= sub->length(),
-// and should check that pat->length() + start_index <= sub->length()
+// and should check that pat->length() + start_index <= sub->length().
 int Runtime::StringMatch(Handle<String> sub,
                          Handle<String> pat,
                          int start_index) {
@@ -3296,7 +3200,7 @@
   if (regexp->TypeTag() == JSRegExp::ATOM) {
     Handle<String> pattern(
         String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex)));
-    if (!pattern->IsFlat()) FlattenString(pattern);
+    ASSERT(pattern->IsFlat());
     if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) {
       return *builder.ToJSArray(result_array);
     }
@@ -3596,7 +3500,8 @@
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
   CONVERT_CHECKED(String, name, args[1]);
   CONVERT_CHECKED(Smi, flag_setter, args[2]);
-  CONVERT_CHECKED(JSFunction, fun, args[3]);
+  Object* fun = args[3];
+  RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
   CONVERT_CHECKED(Smi, flag_attr, args[4]);
   int unchecked = flag_attr->value();
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -3652,7 +3557,7 @@
   }
 
   LookupResult result;
-  js_object->LocalLookupRealNamedProperty(*name, &result);
+  js_object->LookupRealNamedProperty(*name, &result);
 
   // Take special care when attributes are different and there is already
   // a property. For simplicity we normalize the property which enables us
@@ -3660,7 +3565,8 @@
   // map. The current version of SetObjectProperty does not handle attributes
   // correctly in the case where a property is a field and is reset with
   // new attributes.
-  if (result.IsProperty() && attr != result.GetAttributes()) {
+  if (result.IsProperty() &&
+      (attr != result.GetAttributes() || result.type() == CALLBACKS)) {
     // New attributes - normalize to avoid writing to instance descriptor
     NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
     // Use IgnoreAttributes version since a readonly property may be
@@ -4624,6 +4530,222 @@
 }
 
 
+static const unsigned int kQuoteTableLength = 128u;
+
+static const int kJsonQuotesCharactersPerEntry = 8;
+static const char* const JsonQuotes =
+    "\\u0000  \\u0001  \\u0002  \\u0003  "
+    "\\u0004  \\u0005  \\u0006  \\u0007  "
+    "\\b      \\t      \\n      \\u000b  "
+    "\\f      \\r      \\u000e  \\u000f  "
+    "\\u0010  \\u0011  \\u0012  \\u0013  "
+    "\\u0014  \\u0015  \\u0016  \\u0017  "
+    "\\u0018  \\u0019  \\u001a  \\u001b  "
+    "\\u001c  \\u001d  \\u001e  \\u001f  "
+    "        !       \\\"      #       "
+    "$       %       &       '       "
+    "(       )       *       +       "
+    ",       -       .       /       "
+    "0       1       2       3       "
+    "4       5       6       7       "
+    "8       9       :       ;       "
+    "<       =       >       ?       "
+    "@       A       B       C       "
+    "D       E       F       G       "
+    "H       I       J       K       "
+    "L       M       N       O       "
+    "P       Q       R       S       "
+    "T       U       V       W       "
+    "X       Y       Z       [       "
+    "\\\\      ]       ^       _       "
+    "`       a       b       c       "
+    "d       e       f       g       "
+    "h       i       j       k       "
+    "l       m       n       o       "
+    "p       q       r       s       "
+    "t       u       v       w       "
+    "x       y       z       {       "
+    "|       }       ~       \177       ";
+
+
+// For a string that is less than 32k characters it should always be
+// possible to allocate it in new space.
+static const int kMaxGuaranteedNewSpaceString = 32 * 1024;
+
+
+// Doing JSON quoting cannot make the string more than this many times larger.
+static const int kJsonQuoteWorstCaseBlowup = 6;
+
+
+// Covers the entire ASCII range (all other characters are unchanged by JSON
+// quoting).
+static const byte JsonQuoteLengths[kQuoteTableLength] = {
+    6, 6, 6, 6, 6, 6, 6, 6,
+    2, 2, 2, 6, 2, 2, 6, 6,
+    6, 6, 6, 6, 6, 6, 6, 6,
+    6, 6, 6, 6, 6, 6, 6, 6,
+    1, 1, 2, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 2, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+    1, 1, 1, 1, 1, 1, 1, 1,
+};
+
+
+template <typename StringType>
+MaybeObject* AllocateRawString(int length);
+
+
+template <>
+MaybeObject* AllocateRawString<SeqTwoByteString>(int length) {
+  return Heap::AllocateRawTwoByteString(length);
+}
+
+
+template <>
+MaybeObject* AllocateRawString<SeqAsciiString>(int length) {
+  return Heap::AllocateRawAsciiString(length);
+}
+
+
+template <typename Char, typename StringType>
+static MaybeObject* SlowQuoteJsonString(Vector<const Char> characters) {
+  int length = characters.length();
+  const Char* read_cursor = characters.start();
+  const Char* end = read_cursor + length;
+  const int kSpaceForQuotes = 2;
+  int quoted_length = kSpaceForQuotes;
+  while (read_cursor < end) {
+    Char c = *(read_cursor++);
+    if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+      quoted_length++;
+    } else {
+      quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
+    }
+  }
+  MaybeObject* new_alloc = AllocateRawString<StringType>(quoted_length);
+  Object* new_object;
+  if (!new_alloc->ToObject(&new_object)) {
+    return new_alloc;
+  }
+  StringType* new_string = StringType::cast(new_object);
+
+  Char* write_cursor = reinterpret_cast<Char*>(
+      new_string->address() + SeqAsciiString::kHeaderSize);
+  *(write_cursor++) = '"';
+
+  read_cursor = characters.start();
+  while (read_cursor < end) {
+    Char c = *(read_cursor++);
+    if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+      *(write_cursor++) = c;
+    } else {
+      int len = JsonQuoteLengths[static_cast<unsigned>(c)];
+      const char* replacement = JsonQuotes +
+          static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
+      for (int i = 0; i < len; i++) {
+        *write_cursor++ = *replacement++;
+      }
+    }
+  }
+  *(write_cursor++) = '"';
+  return new_string;
+}
+
+
+template <typename Char, typename StringType>
+static MaybeObject* QuoteJsonString(Vector<const Char> characters) {
+  int length = characters.length();
+  Counters::quote_json_char_count.Increment(length);
+  const int kSpaceForQuotes = 2;
+  int worst_case_length = length * kJsonQuoteWorstCaseBlowup + kSpaceForQuotes;
+  if (worst_case_length > kMaxGuaranteedNewSpaceString) {
+    return SlowQuoteJsonString<Char, StringType>(characters);
+  }
+
+  MaybeObject* new_alloc = AllocateRawString<StringType>(worst_case_length);
+  Object* new_object;
+  if (!new_alloc->ToObject(&new_object)) {
+    return new_alloc;
+  }
+  if (!Heap::new_space()->Contains(new_object)) {
+    // Even if our string is small enough to fit in new space we still have to
+    // handle it being allocated in old space as may happen in the third
+    // attempt.  See CALL_AND_RETRY in heap-inl.h and similar code in
+    // CEntryStub::GenerateCore.
+    return SlowQuoteJsonString<Char, StringType>(characters);
+  }
+  StringType* new_string = StringType::cast(new_object);
+  ASSERT(Heap::new_space()->Contains(new_string));
+
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  Char* write_cursor = reinterpret_cast<Char*>(
+      new_string->address() + SeqAsciiString::kHeaderSize);
+  *(write_cursor++) = '"';
+
+  const Char* read_cursor = characters.start();
+  const Char* end = read_cursor + length;
+  while (read_cursor < end) {
+    Char c = *(read_cursor++);
+    if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+      *(write_cursor++) = c;
+    } else {
+      int len = JsonQuoteLengths[static_cast<unsigned>(c)];
+      const char* replacement = JsonQuotes +
+          static_cast<unsigned>(c) * kJsonQuotesCharactersPerEntry;
+      write_cursor[0] = replacement[0];
+      if (len > 1) {
+        write_cursor[1] = replacement[1];
+        if (len > 2) {
+          ASSERT(len == 6);
+          write_cursor[2] = replacement[2];
+          write_cursor[3] = replacement[3];
+          write_cursor[4] = replacement[4];
+          write_cursor[5] = replacement[5];
+        }
+      }
+      write_cursor += len;
+    }
+  }
+  *(write_cursor++) = '"';
+
+  int final_length = static_cast<int>(
+      write_cursor - reinterpret_cast<Char*>(
+          new_string->address() + SeqAsciiString::kHeaderSize));
+  Heap::new_space()->ShrinkStringAtAllocationBoundary<StringType>(new_string,
+                                                                  final_length);
+  return new_string;
+}
+
+
+static MaybeObject* Runtime_QuoteJSONString(Arguments args) {
+  NoHandleAllocation ha;
+  CONVERT_CHECKED(String, str, args[0]);
+  if (!str->IsFlat()) {
+    MaybeObject* try_flatten = str->TryFlatten();
+    Object* flat;
+    if (!try_flatten->ToObject(&flat)) {
+      return try_flatten;
+    }
+    str = String::cast(flat);
+    ASSERT(str->IsFlat());
+  }
+  if (str->IsTwoByteRepresentation()) {
+    return QuoteJsonString<uc16, SeqTwoByteString>(str->ToUC16Vector());
+  } else {
+    return QuoteJsonString<char, SeqAsciiString>(str->ToAsciiVector());
+  }
+}
+
+
+
 static MaybeObject* Runtime_StringParseInt(Arguments args) {
   NoHandleAllocation ha;
 
@@ -5278,6 +5400,13 @@
 }
 
 
+static MaybeObject* Runtime_AllocateHeapNumber(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+  return Heap::AllocateHeapNumber(0);
+}
+
+
 static MaybeObject* Runtime_NumberAdd(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -5886,37 +6015,6 @@
 }
 
 
-// Helper function to compute x^y, where y is known to be an
-// integer. Uses binary decomposition to limit the number of
-// multiplications; see the discussion in "Hacker's Delight" by Henry
-// S. Warren, Jr., figure 11-6, page 213.
-static double powi(double x, int y) {
-  ASSERT(y != kMinInt);
-  unsigned n = (y < 0) ? -y : y;
-  double m = x;
-  double p = 1;
-  while (true) {
-    if ((n & 1) != 0) p *= m;
-    n >>= 1;
-    if (n == 0) {
-      if (y < 0) {
-        // Unfortunately, we have to be careful when p has reached
-        // infinity in the computation, because sometimes the higher
-        // internal precision in the pow() implementation would have
-        // given us a finite p. This happens very rarely.
-        double result = 1.0 / p;
-        return (result == 0 && isinf(p))
-            ? pow(x, static_cast<double>(y))  // Avoid pow(double, int).
-            : result;
-      } else {
-        return p;
-      }
-    }
-    m *= m;
-  }
-}
-
-
 static MaybeObject* Runtime_Math_pow(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -5928,31 +6026,11 @@
   // custom powi() function than the generic pow().
   if (args[1]->IsSmi()) {
     int y = Smi::cast(args[1])->value();
-    return Heap::NumberFromDouble(powi(x, y));
+    return Heap::NumberFromDouble(power_double_int(x, y));
   }
 
   CONVERT_DOUBLE_CHECKED(y, args[1]);
-
-  if (!isinf(x)) {
-    if (y == 0.5) {
-      // It's not uncommon to use Math.pow(x, 0.5) to compute the
-      // square root of a number. To speed up such computations, we
-      // explictly check for this case and use the sqrt() function
-      // which is faster than pow().
-      return Heap::AllocateHeapNumber(sqrt(x));
-    } else if (y == -0.5) {
-      // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
-      return Heap::AllocateHeapNumber(1.0 / sqrt(x));
-    }
-  }
-
-  if (y == 0) {
-    return Smi::FromInt(1);
-  } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
-    return Heap::nan_value();
-  } else {
-    return Heap::AllocateHeapNumber(pow(x, y));
-  }
+  return Heap::AllocateHeapNumber(power_double_double(x, y));
 }
 
 // Fast version of Math.pow if we know that y is not an integer and
@@ -5963,11 +6041,11 @@
   CONVERT_DOUBLE_CHECKED(x, args[0]);
   CONVERT_DOUBLE_CHECKED(y, args[1]);
   if (y == 0) {
-      return Smi::FromInt(1);
+    return Smi::FromInt(1);
   } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
-      return Heap::nan_value();
+    return Heap::nan_value();
   } else {
-      return Heap::AllocateHeapNumber(pow(x, y));
+    return Heap::AllocateHeapNumber(pow(x, y));
   }
 }
 
@@ -6550,9 +6628,12 @@
     }
   }
 
-  // The function should be compiled for the optimization hints to be available.
+  // The function should be compiled for the optimization hints to be
+  // available. We cannot use EnsureCompiled because that forces a
+  // compilation through the shared function info which makes it
+  // impossible for us to optimize.
   Handle<SharedFunctionInfo> shared(function->shared());
-  EnsureCompiled(shared, CLEAR_EXCEPTION);
+  if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
 
   if (!function->has_initial_map() &&
       shared->IsInobjectSlackTrackingInProgress()) {
@@ -6596,7 +6677,7 @@
 #ifdef DEBUG
   if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
     PrintF("[lazy: ");
-    function->shared()->name()->Print();
+    function->PrintName();
     PrintF("]\n");
   }
 #endif
@@ -6613,10 +6694,241 @@
     return Failure::Exception();
   }
 
+  // All done. Return the compiled code.
+  ASSERT(function->is_compiled());
   return function->code();
 }
 
 
+static MaybeObject* Runtime_LazyRecompile(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  Handle<JSFunction> function = args.at<JSFunction>(0);
+  // If the function is not optimizable or debugger is active continue using the
+  // code from the full compiler.
+  if (!function->shared()->code()->optimizable() ||
+      Debug::has_break_points()) {
+    function->ReplaceCode(function->shared()->code());
+    return function->code();
+  }
+  if (CompileOptimized(function, AstNode::kNoNumber)) {
+    return function->code();
+  }
+  function->ReplaceCode(function->shared()->code());
+  return Failure::Exception();
+}
+
+
+static MaybeObject* Runtime_NotifyDeoptimized(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(args[0]->IsSmi());
+  Deoptimizer::BailoutType type =
+      static_cast<Deoptimizer::BailoutType>(Smi::cast(args[0])->value());
+  Deoptimizer* deoptimizer = Deoptimizer::Grab();
+  ASSERT(Heap::IsAllocationAllowed());
+  int frames = deoptimizer->output_count();
+
+  JavaScriptFrameIterator it;
+  JavaScriptFrame* frame = NULL;
+  for (int i = 0; i < frames; i++) {
+    if (i != 0) it.Advance();
+    frame = it.frame();
+    deoptimizer->InsertHeapNumberValues(frames - i - 1, frame);
+  }
+  delete deoptimizer;
+
+  RUNTIME_ASSERT(frame->function()->IsJSFunction());
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<Object> arguments;
+  for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
+    if (frame->GetExpression(i) == Heap::the_hole_value()) {
+      if (arguments.is_null()) {
+        // FunctionGetArguments can't throw an exception, so cast away the
+        // doubt with an assert.
+        arguments = Handle<Object>(
+            Accessors::FunctionGetArguments(*function,
+                                            NULL)->ToObjectUnchecked());
+        ASSERT(*arguments != Heap::null_value());
+        ASSERT(*arguments != Heap::undefined_value());
+      }
+      frame->SetExpression(i, *arguments);
+    }
+  }
+
+  CompilationCache::MarkForLazyOptimizing(function);
+  if (type == Deoptimizer::EAGER) {
+    RUNTIME_ASSERT(function->IsOptimized());
+  } else {
+    RUNTIME_ASSERT(!function->IsOptimized());
+  }
+
+  // Avoid doing too much work when running with --always-opt and keep
+  // the optimized code around.
+  if (FLAG_always_opt || type == Deoptimizer::LAZY) {
+    return Heap::undefined_value();
+  }
+
+  // Count the number of optimized activations of the function.
+  int activations = 0;
+  while (!it.done()) {
+    JavaScriptFrame* frame = it.frame();
+    if (frame->is_optimized() && frame->function() == *function) {
+      activations++;
+    }
+    it.Advance();
+  }
+
+  // TODO(kasperl): For now, we cannot support removing the optimized
+  // code when we have recursive invocations of the same function.
+  if (activations == 0) {
+    if (FLAG_trace_deopt) {
+      PrintF("[removing optimized code for: ");
+      function->PrintName();
+      PrintF("]\n");
+    }
+    function->ReplaceCode(function->shared()->code());
+  }
+  return Heap::undefined_value();
+}
+
+
+static MaybeObject* Runtime_NotifyOSR(Arguments args) {
+  Deoptimizer* deoptimizer = Deoptimizer::Grab();
+  delete deoptimizer;
+  return Heap::undefined_value();
+}
+
+
+static MaybeObject* Runtime_DeoptimizeFunction(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  if (!function->IsOptimized()) return Heap::undefined_value();
+
+  Deoptimizer::DeoptimizeFunction(*function);
+
+  return Heap::undefined_value();
+}
+
+
+static MaybeObject* Runtime_CompileForOnStackReplacement(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+
+  // We're not prepared to handle a function with arguments object.
+  ASSERT(!function->shared()->scope_info()->HasArgumentsShadow());
+
+  // We have hit a back edge in an unoptimized frame for a function that was
+  // selected for on-stack replacement.  Find the unoptimized code object.
+  Handle<Code> unoptimized(function->shared()->code());
+  // Keep track of whether we've succeeded in optimizing.
+  bool succeeded = unoptimized->optimizable();
+  if (succeeded) {
+    // If we are trying to do OSR when there are already optimized
+    // activations of the function, it means (a) the function is directly or
+    // indirectly recursive and (b) an optimized invocation has been
+    // deoptimized so that we are currently in an unoptimized activation.
+    // Check for optimized activations of this function.
+    JavaScriptFrameIterator it;
+    while (succeeded && !it.done()) {
+      JavaScriptFrame* frame = it.frame();
+      succeeded = !frame->is_optimized() || frame->function() != *function;
+      it.Advance();
+    }
+  }
+
+  int ast_id = AstNode::kNoNumber;
+  if (succeeded) {
+    // The top JS function is this one, the PC is somewhere in the
+    // unoptimized code.
+    JavaScriptFrameIterator it;
+    JavaScriptFrame* frame = it.frame();
+    ASSERT(frame->function() == *function);
+    ASSERT(frame->code() == *unoptimized);
+    ASSERT(unoptimized->contains(frame->pc()));
+
+    // Use linear search of the unoptimized code's stack check table to find
+    // the AST id matching the PC.
+    Address start = unoptimized->instruction_start();
+    unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
+    Address table_cursor = start + unoptimized->stack_check_table_start();
+    uint32_t table_length = Memory::uint32_at(table_cursor);
+    table_cursor += kIntSize;
+    for (unsigned i = 0; i < table_length; ++i) {
+      // Table entries are (AST id, pc offset) pairs.
+      uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
+      if (pc_offset == target_pc_offset) {
+        ast_id = static_cast<int>(Memory::uint32_at(table_cursor));
+        break;
+      }
+      table_cursor += 2 * kIntSize;
+    }
+    ASSERT(ast_id != AstNode::kNoNumber);
+    if (FLAG_trace_osr) {
+      PrintF("[replacing on-stack at AST id %d in ", ast_id);
+      function->PrintName();
+      PrintF("]\n");
+    }
+
+    // Try to compile the optimized code.  A true return value from
+    // CompileOptimized means that compilation succeeded, not necessarily
+    // that optimization succeeded.
+    if (CompileOptimized(function, ast_id) && function->IsOptimized()) {
+      DeoptimizationInputData* data = DeoptimizationInputData::cast(
+          function->code()->deoptimization_data());
+      if (data->OsrPcOffset()->value() >= 0) {
+        if (FLAG_trace_osr) {
+          PrintF("[on-stack replacement offset %d in optimized code]\n",
+               data->OsrPcOffset()->value());
+        }
+        ASSERT(data->OsrAstId()->value() == ast_id);
+      } else {
+        // We may never generate the desired OSR entry if we emit an
+        // early deoptimize.
+        succeeded = false;
+      }
+    } else {
+      succeeded = false;
+    }
+  }
+
+  // Revert to the original stack checks in the original unoptimized code.
+  if (FLAG_trace_osr) {
+    PrintF("[restoring original stack checks in ");
+    function->PrintName();
+    PrintF("]\n");
+  }
+  StackCheckStub check_stub;
+  Handle<Code> check_code = check_stub.GetCode();
+  Handle<Code> replacement_code(
+      Builtins::builtin(Builtins::OnStackReplacement));
+  // Iterate the unoptimized code and revert all the patched stack checks.
+  for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask);
+       !it.done();
+       it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->target_address() == replacement_code->entry()) {
+      Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
+    }
+  }
+
+  // Allow OSR only at nesting level zero again.
+  unoptimized->set_allow_osr_at_loop_nesting_level(0);
+
+  // If the optimization attempt succeeded, return the AST id tagged as a
+  // smi. This tells the builtin that we need to translate the unoptimized
+  // frame to an optimized one.
+  if (succeeded) {
+    ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+    return Smi::FromInt(ast_id);
+  } else {
+    return Smi::FromInt(-1);
+  }
+}
+
+
 static MaybeObject* Runtime_GetFunctionDelegate(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -7351,13 +7663,13 @@
 }
 
 
-// Push an array unto an array of arrays if it is not already in the
+// Push an object unto an array of objects if it is not already in the
 // array.  Returns true if the element was pushed on the stack and
 // false otherwise.
 static MaybeObject* Runtime_PushIfAbsent(Arguments args) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, array, args[0]);
-  CONVERT_CHECKED(JSArray, element, args[1]);
+  CONVERT_CHECKED(JSObject, element, args[1]);
   RUNTIME_ASSERT(array->HasFastElements());
   int length = Smi::cast(array->length())->value();
   FixedArray* elements = FixedArray::cast(array->elements());
@@ -7894,7 +8206,7 @@
     int keys_length = keys->length();
     for (int i = 0; i < keys_length; i++) {
       Object* key = keys->get(i);
-      uint32_t index;
+      uint32_t index = 0;
       if (!key->ToArrayIndex(&index) || index >= length) {
         // Zap invalid keys.
         keys->set_undefined(i);
@@ -8021,6 +8333,7 @@
         MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
             receiver, structure, name, result->holder());
         if (!maybe_value->ToObject(&value)) {
+          if (maybe_value->IsRetryAfterGC()) return maybe_value;
           ASSERT(maybe_value->IsException());
           maybe_value = Top::pending_exception();
           Top::clear_pending_exception();
@@ -8321,6 +8634,9 @@
   }
   if (it.done()) return Heap::undefined_value();
 
+  bool is_optimized_frame =
+      it.frame()->code()->kind() == Code::OPTIMIZED_FUNCTION;
+
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
   SaveContext* save = Top::save_context();
@@ -8352,18 +8668,28 @@
   // (e.g. .result)?  For users of the debugger, they will probably be
   // confusing.
   Handle<FixedArray> locals = Factory::NewFixedArray(info.NumberOfLocals() * 2);
-  for (int i = 0; i < info.NumberOfLocals(); i++) {
-    // Name of the local.
-    locals->set(i * 2, *info.LocalName(i));
 
-    // Fetch the value of the local - either from the stack or from a
-    // heap-allocated context.
-    if (i < info.number_of_stack_slots()) {
+  // Fill in the names of the locals.
+  for (int i = 0; i < info.NumberOfLocals(); i++) {
+    locals->set(i * 2, *info.LocalName(i));
+  }
+
+  // Fill in the values of the locals.
+  for (int i = 0; i < info.NumberOfLocals(); i++) {
+    if (is_optimized_frame) {
+      // If we are inspecting an optimized frame use undefined as the
+      // value for all locals.
+      //
+      // TODO(3141533): We should be able to get the correct values
+      // for locals in optimized frames.
+      locals->set(i * 2 + 1, Heap::undefined_value());
+    } else if (i < info.number_of_stack_slots()) {
+      // Get the value from the stack.
       locals->set(i * 2 + 1, it.frame()->GetExpression(i));
     } else {
-      Handle<String> name = info.LocalName(i);
       // Traverse the context chain to the function context as all local
       // variables stored in the context will be on the function context.
+      Handle<String> name = info.LocalName(i);
       while (!context->is_function_context()) {
         context = Handle<Context>(context->previous());
       }
@@ -8373,8 +8699,12 @@
     }
   }
 
-  // Check whether this frame is positioned at return.
-  int at_return = (index == 0) ? Debug::IsBreakAtReturn(it.frame()) : false;
+  // Check whether this frame is positioned at return. If not top
+  // frame or if the frame is optimized it cannot be at a return.
+  bool at_return = false;
+  if (!is_optimized_frame && index == 0) {
+    at_return = Debug::IsBreakAtReturn(it.frame());
+  }
 
   // If positioned just before return find the value to be returned and add it
   // to the frame information.
@@ -8468,8 +8798,13 @@
       details->set(details_index++, Heap::undefined_value());
     }
 
-    // Parameter value.
-    if (i < it.frame()->GetProvidedParametersCount()) {
+    // Parameter value. If we are inspecting an optimized frame, use
+    // undefined as the value.
+    //
+    // TODO(3141533): We should be able to get the actual parameter
+    // value for optimized frames.
+    if (!is_optimized_frame &&
+        (i < it.frame()->GetProvidedParametersCount())) {
       details->set(details_index++, it.frame()->GetParameter(i));
     } else {
       details->set(details_index++, Heap::undefined_value());
@@ -9063,7 +9398,7 @@
   // Iterate the heap looking for SharedFunctionInfo generated from the
   // script. The inner most SharedFunctionInfo containing the source position
   // for the requested break point is found.
-  // NOTE: This might reqire several heap iterations. If the SharedFunctionInfo
+  // NOTE: This might require several heap iterations. If the SharedFunctionInfo
   // which is found is not compiled it is compiled and the heap is iterated
   // again as the compilation might create inner functions from the newly
   // compiled function and the actual requested break point might be in one of
@@ -9345,7 +9680,7 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  ASSERT(args.length() == 4);
+  ASSERT(args.length() == 5);
   Object* check_result;
   { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
     if (!maybe_check_result->ToObject(&check_result)) {
@@ -9355,6 +9690,7 @@
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
   CONVERT_ARG_CHECKED(String, source, 2);
   CONVERT_BOOLEAN_CHECKED(disable_break, args[3]);
+  Handle<Object> additional_context(args[4]);
 
   // Handle the processing of break.
   DisableBreak disable_break_save(disable_break);
@@ -9405,6 +9741,11 @@
   Handle<Context> function_context(frame_context->fcontext());
   context = CopyWithContextChain(frame_context, context);
 
+  if (additional_context->IsJSObject()) {
+    context = Factory::NewWithContext(context,
+        Handle<JSObject>::cast(additional_context), false);
+  }
+
   // Wrap the evaluation statement in a new function compiled in the newly
   // created context. The function has one parameter which has to be called
   // 'arguments'. This it to have access to what would have been 'arguments' in
@@ -9459,7 +9800,7 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  ASSERT(args.length() == 3);
+  ASSERT(args.length() == 4);
   Object* check_result;
   { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args);
     if (!maybe_check_result->ToObject(&check_result)) {
@@ -9468,6 +9809,7 @@
   }
   CONVERT_ARG_CHECKED(String, source, 1);
   CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
+  Handle<Object> additional_context(args[3]);
 
   // Handle the processing of break.
   DisableBreak disable_break_save(disable_break);
@@ -9486,11 +9828,24 @@
   // debugger was invoked.
   Handle<Context> context = Top::global_context();
 
+  bool is_global = true;
+
+  if (additional_context->IsJSObject()) {
+    // Create a function context first, than put 'with' context on top of it.
+    Handle<JSFunction> go_between = Factory::NewFunction(
+        Factory::empty_string(), Factory::undefined_value());
+    go_between->set_context(*context);
+    context =
+        Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
+    context->set_extension(JSObject::cast(*additional_context));
+    is_global = false;
+  }
+
   // Compile the source to be evaluated.
   Handle<SharedFunctionInfo> shared =
       Compiler::CompileEval(source,
                             context,
-                            true);
+                            is_global);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared,
@@ -9885,6 +10240,15 @@
   }
 }
 
+
+static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(Arguments args) {
+  ASSERT(args.length() == 1);
+  HandleScope scope;
+  CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
+  return LiveEdit::FunctionSourceUpdated(shared_info);
+}
+
+
 // Replaces code of SharedFunctionInfo with a new one.
 static MaybeObject* Runtime_LiveEditReplaceFunctionCode(Arguments args) {
   ASSERT(args.length() == 2);
@@ -9987,7 +10351,12 @@
 
   Handle<Code> code(function->code());
 
-  RelocIterator it(*code, 1 << RelocInfo::STATEMENT_POSITION);
+  if (code->kind() != Code::FUNCTION &&
+      code->kind() != Code::OPTIMIZED_FUNCTION) {
+    return Heap::undefined_value();
+  }
+
+  RelocIterator it(*code, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
   int closest_pc = 0;
   int distance = kMaxInt;
   while (!it.done()) {
@@ -10141,9 +10510,9 @@
 }
 
 
-// Collect the raw data for a stack trace.  Returns an array of three
-// element segments each containing a receiver, function and native
-// code offset.
+// Collect the raw data for a stack trace.  Returns an array of 4
+// element segments each containing a receiver, function, code and
+// native code offset.
 static MaybeObject* Runtime_CollectStackTrace(Arguments args) {
   ASSERT_EQ(args.length(), 2);
   Handle<Object> caller = args.at<Object>(0);
@@ -10153,7 +10522,7 @@
 
   limit = Max(limit, 0);  // Ensure that limit is not negative.
   int initial_size = Min(limit, 10);
-  Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
+  Handle<JSArray> result = Factory::NewJSArray(initial_size * 4);
 
   StackFrameIterator iter;
   // If the caller parameter is a function we skip frames until we're
@@ -10166,23 +10535,25 @@
     if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
       frames_seen++;
       JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-      Object* recv = frame->receiver();
-      Object* fun = frame->function();
-      Address pc = frame->pc();
-      Address start = frame->code()->address();
-      Smi* offset = Smi::FromInt(static_cast<int>(pc - start));
-      FixedArray* elements = FixedArray::cast(result->elements());
-      if (cursor + 2 < elements->length()) {
-        elements->set(cursor++, recv);
-        elements->set(cursor++, fun);
-        elements->set(cursor++, offset);
-      } else {
-        HandleScope scope;
-        Handle<Object> recv_handle(recv);
-        Handle<Object> fun_handle(fun);
-        SetElement(result, cursor++, recv_handle);
-        SetElement(result, cursor++, fun_handle);
-        SetElement(result, cursor++, Handle<Smi>(offset));
+      List<FrameSummary> frames(3);  // Max 2 levels of inlining.
+      frame->Summarize(&frames);
+      for (int i = frames.length() - 1; i >= 0; i--) {
+        Handle<Object> recv = frames[i].receiver();
+        Handle<JSFunction> fun = frames[i].function();
+        Handle<Code> code = frames[i].code();
+        Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
+        FixedArray* elements = FixedArray::cast(result->elements());
+        if (cursor + 3 < elements->length()) {
+          elements->set(cursor++, *recv);
+          elements->set(cursor++, *fun);
+          elements->set(cursor++, *code);
+          elements->set(cursor++, *offset);
+        } else {
+          SetElement(result, cursor++, recv);
+          SetElement(result, cursor++, fun);
+          SetElement(result, cursor++, code);
+          SetElement(result, cursor++, offset);
+        }
       }
     }
     iter.Advance();
diff --git a/src/runtime.h b/src/runtime.h
index f9ebbc4..5ecae7e 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -79,6 +79,11 @@
   F(GetConstructorDelegate, 1, 1) \
   F(NewArgumentsFast, 3, 1) \
   F(LazyCompile, 1, 1) \
+  F(LazyRecompile, 1, 1) \
+  F(NotifyDeoptimized, 1, 1) \
+  F(NotifyOSR, 0, 1) \
+  F(DeoptimizeFunction, 1, 1)             \
+  F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
   \
@@ -100,6 +105,7 @@
   F(CharFromCode, 1, 1) \
   F(URIEscape, 1, 1) \
   F(URIUnescape, 1, 1) \
+  F(QuoteJSONString, 1, 1) \
   \
   F(NumberToString, 1, 1) \
   F(NumberToStringSkipCache, 1, 1) \
@@ -108,6 +114,7 @@
   F(NumberToJSUint32, 1, 1) \
   F(NumberToJSInt32, 1, 1) \
   F(NumberToSmi, 1, 1) \
+  F(AllocateHeapNumber, 0, 1) \
   \
   /* Arithmetic operations */ \
   F(NumberAdd, 2, 1) \
@@ -335,8 +342,8 @@
   F(IsBreakOnException, 1, 1) \
   F(PrepareStep, 3, 1) \
   F(ClearStepping, 0, 1) \
-  F(DebugEvaluate, 4, 1) \
-  F(DebugEvaluateGlobal, 3, 1) \
+  F(DebugEvaluate, 5, 1) \
+  F(DebugEvaluateGlobal, 4, 1) \
   F(DebugGetLoadedScripts, 0, 1) \
   F(DebugReferencedBy, 3, 1) \
   F(DebugConstructedBy, 2, 1) \
@@ -349,6 +356,7 @@
   F(LiveEditGatherCompileInfo, 2, 1) \
   F(LiveEditReplaceScript, 3, 1) \
   F(LiveEditReplaceFunctionCode, 2, 1) \
+  F(LiveEditFunctionSourceUpdated, 1, 1) \
   F(LiveEditFunctionSetScript, 2, 1) \
   F(LiveEditReplaceRefToNestedFunction, 3, 1) \
   F(LiveEditPatchFunctionPositions, 2, 1) \
@@ -416,6 +424,7 @@
   F(MathSin, 1, 1)                                                           \
   F(MathCos, 1, 1)                                                           \
   F(MathSqrt, 1, 1)                                                          \
+  F(MathLog, 1, 1)                                                           \
   F(IsRegExpEquivalent, 2, 1)                                                \
   F(HasCachedArrayIndex, 1, 1)                                               \
   F(GetCachedArrayIndex, 1, 1)                                               \
diff --git a/src/runtime.js b/src/runtime.js
index f2c8d6b..28a38ca 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -594,13 +594,15 @@
 
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultNumber(x) {
-  if (IS_FUNCTION(x.valueOf)) {
-    var v = x.valueOf();
+  var valueOf = x.valueOf;
+  if (IS_FUNCTION(valueOf)) {
+    var v = %_CallFunction(x, valueOf);
     if (%IsPrimitive(v)) return v;
   }
 
-  if (IS_FUNCTION(x.toString)) {
-    var s = x.toString();
+  var toString = x.toString;
+  if (IS_FUNCTION(toString)) {
+    var s = %_CallFunction(x, toString);
     if (%IsPrimitive(s)) return s;
   }
 
@@ -610,13 +612,15 @@
 
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultString(x) {
-  if (IS_FUNCTION(x.toString)) {
-    var s = x.toString();
+  var toString = x.toString;
+  if (IS_FUNCTION(toString)) {
+    var s = %_CallFunction(x, toString);
     if (%IsPrimitive(s)) return s;
   }
 
-  if (IS_FUNCTION(x.valueOf)) {
-    var v = x.valueOf();
+  var valueOf = x.valueOf;
+  if (IS_FUNCTION(valueOf)) {
+    var v = %_CallFunction(x, valueOf);
     if (%IsPrimitive(v)) return v;
   }
 
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
new file mode 100644
index 0000000..b9468a5
--- /dev/null
+++ b/src/safepoint-table.cc
@@ -0,0 +1,210 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "safepoint-table.h"
+#include "disasm.h"
+
+namespace v8 {
+namespace internal {
+
+SafepointTable::SafepointTable(Code* code) {
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  code_ = code;
+  Address header = code->instruction_start() + code->safepoint_table_start();
+  length_ = Memory::uint32_at(header + kLengthOffset);
+  entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
+  pc_and_deoptimization_indexes_ = header + kHeaderSize;
+  entries_ = pc_and_deoptimization_indexes_ +
+            (length_ * kPcAndDeoptimizationIndexSize);
+  ASSERT(entry_size_ > 0);
+  ASSERT_EQ(DeoptimizationIndexField::max(), Safepoint::kNoDeoptimizationIndex);
+}
+
+
+bool SafepointTable::HasRegisters(uint8_t* entry) {
+  ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+  const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
+  for (int i = 0; i < num_reg_bytes; i++) {
+    if (entry[i] != kNoRegisters) return true;
+  }
+  return false;
+}
+
+
+bool SafepointTable::HasRegisterAt(uint8_t* entry, int reg_index) {
+  ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
+  int byte_index = reg_index >> kBitsPerByteLog2;
+  int bit_index = reg_index & (kBitsPerByte - 1);
+  return (entry[byte_index] & (1 << bit_index)) != 0;
+}
+
+
+void SafepointTable::PrintEntry(unsigned index) const {
+  disasm::NameConverter converter;
+  uint8_t* entry = GetEntry(index);
+
+  // Print the stack slot bits.
+  if (entry_size_ > 0) {
+    ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+    const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
+    int last = entry_size_ - 1;
+    for (int i = first; i < last; i++) PrintBits(entry[i], kBitsPerByte);
+    int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
+    PrintBits(entry[last], last_bits);
+
+    // Print the registers (if any).
+    if (!HasRegisters(entry)) return;
+    for (int j = 0; j < kNumSafepointRegisters; j++) {
+      if (HasRegisterAt(entry, j)) {
+        PrintF(" | %s", converter.NameOfCPURegister(j));
+      }
+    }
+  }
+}
+
+
+void SafepointTable::PrintBits(uint8_t byte, int digits) {
+  ASSERT(digits >= 0 && digits <= kBitsPerByte);
+  for (int i = 0; i < digits; i++) {
+    PrintF("%c", ((byte & (1 << i)) == 0) ? '0' : '1');
+  }
+}
+
+
+Safepoint SafepointTableBuilder::DefineSafepoint(Assembler* assembler,
+                                                 int deoptimization_index) {
+  ASSERT(deoptimization_index != -1);
+  DeoptimizationInfo pc_and_deoptimization_index;
+  pc_and_deoptimization_index.pc = assembler->pc_offset();
+  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
+  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  deoptimization_info_.Add(pc_and_deoptimization_index);
+  indexes_.Add(new ZoneList<int>(8));
+  registers_.Add(NULL);
+  return Safepoint(indexes_.last(), registers_.last());
+}
+
+
+Safepoint SafepointTableBuilder::DefineSafepointWithRegisters(
+    Assembler* assembler, int arguments, int deoptimization_index) {
+  ASSERT(deoptimization_index != -1);
+  ASSERT(arguments == 0);  // Only case that works for now.
+  DeoptimizationInfo pc_and_deoptimization_index;
+  pc_and_deoptimization_index.pc = assembler->pc_offset();
+  pc_and_deoptimization_index.deoptimization_index = deoptimization_index;
+  pc_and_deoptimization_index.pc_after_gap = assembler->pc_offset();
+  deoptimization_info_.Add(pc_and_deoptimization_index);
+  indexes_.Add(new ZoneList<int>(8));
+  registers_.Add(new ZoneList<int>(4));
+  return Safepoint(indexes_.last(), registers_.last());
+}
+
+
+unsigned SafepointTableBuilder::GetCodeOffset() const {
+  ASSERT(emitted_);
+  return offset_;
+}
+
+
+void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
+  // Make sure the safepoint table is properly aligned. Pad with nops.
+  assembler->Align(kIntSize);
+  assembler->RecordComment(";;; Safepoint table.");
+  offset_ = assembler->pc_offset();
+
+  // Take the register bits into account.
+  bits_per_entry += kNumSafepointRegisters;
+
+  // Compute the number of bytes per safepoint entry.
+  int bytes_per_entry =
+      RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
+
+  // Emit the table header.
+  int length = deoptimization_info_.length();
+  assembler->dd(length);
+  assembler->dd(bytes_per_entry);
+
+  // Emit sorted table of pc offsets together with deoptimization indexes and
+  // pc after gap information.
+  for (int i = 0; i < length; i++) {
+    assembler->dd(deoptimization_info_[i].pc);
+    assembler->dd(EncodeDeoptimizationIndexAndGap(deoptimization_info_[i]));
+  }
+
+  // Emit table of bitmaps.
+  ZoneList<uint8_t> bits(bytes_per_entry);
+  for (int i = 0; i < length; i++) {
+    ZoneList<int>* indexes = indexes_[i];
+    ZoneList<int>* registers = registers_[i];
+    bits.Clear();
+    bits.AddBlock(0, bytes_per_entry);
+
+    // Run through the registers (if any).
+    ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+    if (registers == NULL) {
+      const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
+      for (int j = 0; j < num_reg_bytes; j++) {
+        bits[j] = SafepointTable::kNoRegisters;
+      }
+    } else {
+      for (int j = 0; j < registers->length(); j++) {
+        int index = registers->at(j);
+        ASSERT(index >= 0 && index < kNumSafepointRegisters);
+        int byte_index = index >> kBitsPerByteLog2;
+        int bit_index = index & (kBitsPerByte - 1);
+        bits[byte_index] |= (1 << bit_index);
+      }
+    }
+
+    // Run through the indexes and build a bitmap.
+    for (int j = 0; j < indexes->length(); j++) {
+      int index = bits_per_entry - 1 - indexes->at(j);
+      int byte_index = index >> kBitsPerByteLog2;
+      int bit_index = index & (kBitsPerByte - 1);
+      bits[byte_index] |= (1U << bit_index);
+    }
+
+    // Emit the bitmap for the current entry.
+    for (int k = 0; k < bytes_per_entry; k++) {
+      assembler->db(bits[k]);
+    }
+  }
+  emitted_ = true;
+}
+
+
+uint32_t SafepointTableBuilder::EncodeDeoptimizationIndexAndGap(
+    DeoptimizationInfo info) {
+  unsigned index = info.deoptimization_index;
+  unsigned gap_size = info.pc_after_gap - info.pc;
+  uint32_t encoding = SafepointTable::DeoptimizationIndexField::encode(index);
+  encoding |= SafepointTable::GapCodeSizeField::encode(gap_size);
+  return encoding;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
new file mode 100644
index 0000000..010ac57
--- /dev/null
+++ b/src/safepoint-table.h
@@ -0,0 +1,189 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SAFEPOINT_TABLE_H_
+#define V8_SAFEPOINT_TABLE_H_
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "zone.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointTable BASE_EMBEDDED {
+ public:
+  explicit SafepointTable(Code* code);
+
+  int size() const {
+    return kHeaderSize +
+           (length_ * (kPcAndDeoptimizationIndexSize + entry_size_)); }
+  unsigned length() const { return length_; }
+  unsigned entry_size() const { return entry_size_; }
+
+  unsigned GetPcOffset(unsigned index) const {
+    ASSERT(index < length_);
+    return Memory::uint32_at(GetPcOffsetLocation(index));
+  }
+
+  int GetDeoptimizationIndex(unsigned index) const {
+    ASSERT(index < length_);
+    unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
+    return DeoptimizationIndexField::decode(value);
+  }
+
+  unsigned GetGapCodeSize(unsigned index) const {
+    ASSERT(index < length_);
+    unsigned value = Memory::uint32_at(GetDeoptimizationLocation(index));
+    return GapCodeSizeField::decode(value);
+  }
+
+  uint8_t* GetEntry(unsigned index) const {
+    ASSERT(index < length_);
+    return &Memory::uint8_at(entries_ + (index * entry_size_));
+  }
+
+  class GapCodeSizeField: public BitField<unsigned, 0, 8> {};
+  class DeoptimizationIndexField: public BitField<int, 8, 24> {};
+
+  static bool HasRegisters(uint8_t* entry);
+  static bool HasRegisterAt(uint8_t* entry, int reg_index);
+
+  void PrintEntry(unsigned index) const;
+
+ private:
+  static const uint8_t kNoRegisters = 0xFF;
+
+  static const int kLengthOffset = 0;
+  static const int kEntrySizeOffset = kLengthOffset + kIntSize;
+  static const int kHeaderSize = kEntrySizeOffset + kIntSize;
+
+  static const int kPcSize = kIntSize;
+  static const int kDeoptimizationIndexSize = kIntSize;
+  static const int kPcAndDeoptimizationIndexSize =
+      kPcSize + kDeoptimizationIndexSize;
+
+  Address GetPcOffsetLocation(unsigned index) const {
+    return pc_and_deoptimization_indexes_ +
+           (index * kPcAndDeoptimizationIndexSize);
+  }
+
+  Address GetDeoptimizationLocation(unsigned index) const {
+    return GetPcOffsetLocation(index) + kPcSize;
+  }
+
+  static void PrintBits(uint8_t byte, int digits);
+
+  AssertNoAllocation no_allocation_;
+  Code* code_;
+  unsigned length_;
+  unsigned entry_size_;
+
+  Address pc_and_deoptimization_indexes_;
+  Address entries_;
+
+  friend class SafepointTableBuilder;
+};
+
+
+class Safepoint BASE_EMBEDDED {
+ public:
+  static const int kNoDeoptimizationIndex = 0x00ffffff;
+
+  void DefinePointerSlot(int index) { indexes_->Add(index); }
+  void DefinePointerRegister(Register reg) { registers_->Add(reg.code()); }
+
+ private:
+  Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
+      indexes_(indexes), registers_(registers) { }
+  ZoneList<int>* indexes_;
+  ZoneList<int>* registers_;
+
+  friend class SafepointTableBuilder;
+};
+
+
+class SafepointTableBuilder BASE_EMBEDDED {
+ public:
+  SafepointTableBuilder()
+      : deoptimization_info_(32),
+        indexes_(32),
+        registers_(32),
+        emitted_(false) { }
+
+  // Get the offset of the emitted safepoint table in the code.
+  unsigned GetCodeOffset() const;
+
+  // Define a new safepoint for the current position in the body.
+  Safepoint DefineSafepoint(
+      Assembler* assembler,
+      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+
+  // Define a new safepoint with registers on the stack for the
+  // current position in the body and take the number of arguments on
+  // top of the registers into account.
+  Safepoint DefineSafepointWithRegisters(
+      Assembler* assembler,
+      int arguments,
+      int deoptimization_index = Safepoint::kNoDeoptimizationIndex);
+
+  // Update the last safepoint with the size of the code generated for the gap
+  // following it.
+  void SetPcAfterGap(int pc) {
+    ASSERT(!deoptimization_info_.is_empty());
+    int index = deoptimization_info_.length() - 1;
+    deoptimization_info_[index].pc_after_gap = pc;
+  }
+
+  // Emit the safepoint table after the body. The number of bits per
+  // entry must be enough to hold all the pointer indexes.
+  void Emit(Assembler* assembler, int bits_per_entry);
+
+ private:
+  struct DeoptimizationInfo {
+    unsigned pc;
+    unsigned deoptimization_index;
+    unsigned pc_after_gap;
+  };
+
+  uint32_t EncodeDeoptimizationIndexAndGap(DeoptimizationInfo info);
+
+  ZoneList<DeoptimizationInfo> deoptimization_info_;
+  ZoneList<ZoneList<int>*> indexes_;
+  ZoneList<ZoneList<int>*> registers_;
+
+  bool emitted_;
+  unsigned offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(SafepointTableBuilder);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SAFEPOINT_TABLE_H_
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 8242f81..b26fee0 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -35,12 +35,6 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// UTF16Buffer
-
-UTF16Buffer::UTF16Buffer()
-    : pos_(0), end_(kNoEndPosition) { }
-
-// ----------------------------------------------------------------------------
 // LiteralCollector
 
 LiteralCollector::LiteralCollector()
@@ -92,7 +86,7 @@
 // ----------------------------------------------------------------------------
 // Scanner
 
-Scanner::Scanner() : source_(NULL), stack_overflow_(false) {}
+Scanner::Scanner() { }
 
 
 uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -142,8 +136,7 @@
 // ----------------------------------------------------------------------------
 // JavaScriptScanner
 
-JavaScriptScanner::JavaScriptScanner()
-    : has_line_terminator_before_next_(false) {}
+JavaScriptScanner::JavaScriptScanner() : Scanner() {}
 
 
 Token::Value JavaScriptScanner::Next() {
@@ -503,12 +496,21 @@
 
 
 void JavaScriptScanner::SeekForward(int pos) {
-  source_->SeekForward(pos - 1);
-  Advance();
-  // This function is only called to seek to the location
-  // of the end of a function (at the "}" token). It doesn't matter
-  // whether there was a line terminator in the part we skip.
-  has_line_terminator_before_next_ = false;
+  // After this call, we will have the token at the given position as
+  // the "next" token. The "current" token will be invalid.
+  if (pos == next_.location.beg_pos) return;
+  int current_pos = source_pos();
+  ASSERT_EQ(next_.location.end_pos, current_pos);
+  // Positions inside the lookahead token aren't supported.
+  ASSERT(pos >= current_pos);
+  if (pos != current_pos) {
+    source_->SeekForward(pos - source_->pos());
+    Advance();
+    // This function is only called to seek to the location
+    // of the end of a function (at the "}" token). It doesn't matter
+    // whether there was a line terminator in the part we skip.
+    has_line_terminator_before_next_ = false;
+  }
   Scan();
 }
 
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 3714ae2..c50b8f3 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -52,31 +52,75 @@
   return -1;
 }
 
-// ----------------------------------------------------------------------------
-// UTF16Buffer - scanner input source with pushback.
 
-class UTF16Buffer {
+// ---------------------------------------------------------------------
+// Buffered stream of characters, using an internal UC16 buffer.
+
+class UC16CharacterStream {
  public:
-  UTF16Buffer();
-  virtual ~UTF16Buffer() {}
+  UC16CharacterStream() : pos_(0) { }
+  virtual ~UC16CharacterStream() { }
 
-  virtual void PushBack(uc32 ch) = 0;
-  // Returns a value < 0 when the buffer end is reached.
-  virtual uc32 Advance() = 0;
-  virtual void SeekForward(int pos) = 0;
+  // Returns and advances past the next UC16 character in the input
+  // stream. If there are no more characters, it returns a negative
+  // value.
+  inline int32_t Advance() {
+    if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
+      pos_++;
+      return *(buffer_cursor_++);
+    }
+    // Note: currently the following increment is necessary to avoid a
+    // parser problem! The scanner treats the final kEndOfInput as
+    // a character with a position, and does math relative to that
+    // position.
+    pos_++;
 
-  int pos() const { return pos_; }
+    return kEndOfInput;
+  }
 
-  static const int kNoEndPosition = 1;
+  // Return the current position in the character stream.
+  // Starts at zero.
+  inline unsigned pos() const { return pos_; }
+
+  // Skips forward past the next character_count UC16 characters
+  // in the input, or until the end of input if that comes sooner.
+  // Returns the number of characters actually skipped. If less
+  // than character_count,
+  inline unsigned SeekForward(unsigned character_count) {
+    unsigned buffered_chars =
+        static_cast<unsigned>(buffer_end_ - buffer_cursor_);
+    if (character_count <= buffered_chars) {
+      buffer_cursor_ += character_count;
+      pos_ += character_count;
+      return character_count;
+    }
+    return SlowSeekForward(character_count);
+  }
+
+  // Pushes back the most recently read UC16 character, i.e.,
+  // the value returned by the most recent call to Advance.
+  // Must not be used right after calling SeekForward.
+  virtual void PushBack(uc16 character) = 0;
 
  protected:
-  // Initial value of end_ before the input stream is initialized.
+  static const int32_t kEndOfInput = -1;
 
-  int pos_;  // Current position in the buffer.
-  int end_;  // Position where scanning should stop (EOF).
+  // Ensures that the buffer_cursor_ points to the character at
+  // position pos_ of the input, if possible. If the position
+  // is at or after the end of the input, return false. If there
+  // are more characters available, return true.
+  virtual bool ReadBlock() = 0;
+  virtual unsigned SlowSeekForward(unsigned character_count) = 0;
+
+  const uc16* buffer_cursor_;
+  const uc16* buffer_end_;
+  unsigned pos_;
 };
 
 
+// ---------------------------------------------------------------------
+// Constants used by scanners.
+
 class ScannerConstants : AllStatic {
  public:
   typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
@@ -228,8 +272,6 @@
     return Vector<const char>(next_literal_string(), next_literal_length());
   }
 
-  bool stack_overflow() { return stack_overflow_; }
-
   static const int kCharacterLookaheadBufferSize = 1;
 
  protected:
@@ -279,7 +321,7 @@
   // Low-level scanning support.
   void Advance() { c0_ = source_->Advance(); }
   void PushBack(uc32 ch) {
-    source_->PushBack(ch);
+    source_->PushBack(c0_);
     c0_ = ch;
   }
 
@@ -309,15 +351,13 @@
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
 
-  // Input stream. Must be initialized to an UTF16Buffer.
-  UTF16Buffer* source_;
+  // Input stream. Must be initialized to an UC16CharacterStream.
+  UC16CharacterStream* source_;
 
   // Buffer to hold literal values (identifiers, strings, numbers)
   // using '\x00'-terminated UTF-8 encoding. Handles allocation internally.
   LiteralCollector literal_buffer_;
 
-  bool stack_overflow_;
-
   // One Unicode character look-ahead; c0_ < 0 at the end of the input.
   uc32 c0_;
 };
diff --git a/src/scanner.cc b/src/scanner.cc
index 63b2fd8..47e9895 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -36,63 +36,265 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// UTF16Buffer
+// BufferedUC16CharacterStreams
 
-// CharacterStreamUTF16Buffer
-CharacterStreamUTF16Buffer::CharacterStreamUTF16Buffer()
-    : pushback_buffer_(0), last_(0), stream_(NULL) { }
+BufferedUC16CharacterStream::BufferedUC16CharacterStream()
+    : UC16CharacterStream(),
+      pushback_limit_(NULL) {
+  // Initialize buffer as being empty. First read will fill the buffer.
+  buffer_cursor_ = buffer_;
+  buffer_end_ = buffer_;
+}
 
+BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
 
-void CharacterStreamUTF16Buffer::Initialize(Handle<String> data,
-                                            unibrow::CharacterStream* input,
-                                            int start_position,
-                                            int end_position) {
-  stream_ = input;
-  if (start_position > 0) {
-    SeekForward(start_position);
+void BufferedUC16CharacterStream::PushBack(uc16 character) {
+  if (pushback_limit_ == NULL && buffer_cursor_ > buffer_) {
+    // buffer_ is writable, buffer_cursor_ is const pointer.
+    buffer_[--buffer_cursor_ - buffer_] = character;
+    pos_--;
+    return;
   }
-  end_ = end_position != kNoEndPosition ? end_position : kMaxInt;
+  SlowPushBack(character);
 }
 
 
-void CharacterStreamUTF16Buffer::PushBack(uc32 ch) {
-  pushback_buffer()->Add(last_);
-  last_ = ch;
+void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
+  // In pushback mode, the end of the buffer contains pushback,
+  // and the start of the buffer (from buffer start to pushback_limit_)
+  // contains valid data that comes just after the pushback.
+  // We NULL the pushback_limit_ if pushing all the way back to the
+  // start of the buffer.
+
+  if (pushback_limit_ == NULL) {
+    // Enter pushback mode.
+    pushback_limit_ = buffer_end_;
+    buffer_end_ = buffer_ + kBufferSize;
+    buffer_cursor_ = buffer_end_;
+  }
+  ASSERT(pushback_limit_ > buffer_);
+  ASSERT(pos_ > 0);
+  buffer_[--buffer_cursor_ - buffer_] = character;
+  if (buffer_cursor_ == buffer_) {
+    pushback_limit_ = NULL;
+  } else if (buffer_cursor_ < pushback_limit_) {
+    pushback_limit_ = buffer_cursor_;
+  }
   pos_--;
 }
 
 
-uc32 CharacterStreamUTF16Buffer::Advance() {
-  ASSERT(end_ != kNoEndPosition);
-  ASSERT(end_ >= 0);
-  // NOTE: It is of importance to Persian / Farsi resources that we do
-  // *not* strip format control characters in the scanner; see
-  //
-  //    https://bugzilla.mozilla.org/show_bug.cgi?id=274152
-  //
-  // So, even though ECMA-262, section 7.1, page 11, dictates that we
-  // must remove Unicode format-control characters, we do not. This is
-  // in line with how IE and SpiderMonkey handles it.
-  if (!pushback_buffer()->is_empty()) {
-    pos_++;
-    return last_ = pushback_buffer()->RemoveLast();
-  } else if (stream_->has_more() && pos_ < end_) {
-    pos_++;
-    uc32 next = stream_->GetNext();
-    return last_ = next;
-  } else {
-    // Note: currently the following increment is necessary to avoid a
-    // test-parser problem!
-    pos_++;
-    return last_ = static_cast<uc32>(-1);
+bool BufferedUC16CharacterStream::ReadBlock() {
+  if (pushback_limit_ != NULL) {
+    buffer_cursor_ = buffer_;
+    buffer_end_ = pushback_limit_;
+    pushback_limit_ = NULL;
+    ASSERT(buffer_cursor_ != buffer_end_);
+    return true;
+  }
+  unsigned length = FillBuffer(pos_, kBufferSize);
+  buffer_cursor_ = buffer_;
+  buffer_end_ = buffer_ + length;
+  return length > 0;
+}
+
+
+unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
+  // Leave pushback mode (i.e., ignore that there might be valid data
+  // in the buffer before the pushback_limit_ point).
+  pushback_limit_ = NULL;
+  return BufferSeekForward(delta);
+}
+
+// ----------------------------------------------------------------------------
+// GenericStringUC16CharacterStream
+
+
+GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
+    Handle<String> data,
+    unsigned start_position,
+    unsigned end_position)
+    : string_(data),
+      length_(end_position) {
+  ASSERT(end_position >= start_position);
+  buffer_cursor_ = buffer_;
+  buffer_end_ = buffer_;
+  pos_ = start_position;
+}
+
+
+GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
+
+
+unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
+  unsigned old_pos = pos_;
+  pos_ = Min(pos_ + delta, length_);
+  ReadBlock();
+  return pos_ - old_pos;
+}
+
+
+unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
+                                                      unsigned length) {
+  if (from_pos >= length_) return 0;
+  if (from_pos + length > length_) {
+    length = length_ - from_pos;
+  }
+  String::WriteToFlat<uc16>(*string_, buffer_, from_pos, from_pos + length);
+  return length;
+}
+
+
+// ----------------------------------------------------------------------------
+// Utf8ToUC16CharacterStream
+Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
+                                                     unsigned length)
+    : BufferedUC16CharacterStream(),
+      raw_data_(data),
+      raw_data_length_(length),
+      raw_data_pos_(0),
+      raw_character_position_(0) {
+  ReadBlock();
+}
+
+
+Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
+
+
+unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
+  unsigned old_pos = pos_;
+  unsigned target_pos = pos_ + delta;
+  SetRawPosition(target_pos);
+  pos_ = raw_character_position_;
+  ReadBlock();
+  return pos_ - old_pos;
+}
+
+
+unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
+                                               unsigned length) {
+  static const unibrow::uchar kMaxUC16Character = 0xffff;
+  SetRawPosition(char_position);
+  if (raw_character_position_ != char_position) {
+    // char_position was not a valid position in the stream (hit the end
+    // while spooling to it).
+    return 0u;
+  }
+  unsigned i = 0;
+  while (i < length) {
+    if (raw_data_pos_ == raw_data_length_) break;
+    unibrow::uchar c = raw_data_[raw_data_pos_];
+    if (c <= unibrow::Utf8::kMaxOneByteChar) {
+      raw_data_pos_++;
+    } else {
+      c =  unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
+                                         raw_data_length_ - raw_data_pos_,
+                                         &raw_data_pos_);
+      // Don't allow characters outside of the BMP.
+      if (c > kMaxUC16Character) {
+        c = unibrow::Utf8::kBadChar;
+      }
+    }
+    buffer_[i++] = static_cast<uc16>(c);
+  }
+  raw_character_position_ = char_position + i;
+  return i;
+}
+
+
+static const byte kUtf8MultiByteMask = 0xC0;
+static const byte kUtf8MultiByteCharStart = 0xC0;
+static const byte kUtf8MultiByteCharFollower = 0x80;
+
+
+#ifdef DEBUG
+static bool IsUtf8MultiCharacterStart(byte first_byte) {
+  return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
+}
+#endif
+
+
+static bool IsUtf8MultiCharacterFollower(byte later_byte) {
+  return (later_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharFollower;
+}
+
+
+// Move the cursor back to point at the preceding UTF-8 character start
+// in the buffer.
+static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
+  byte character = buffer[--*cursor];
+  if (character > unibrow::Utf8::kMaxOneByteChar) {
+    ASSERT(IsUtf8MultiCharacterFollower(character));
+    // Last byte of a multi-byte character encoding. Step backwards until
+    // pointing to the first byte of the encoding, recognized by having the
+    // top two bits set.
+    while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
+    ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
   }
 }
 
 
-void CharacterStreamUTF16Buffer::SeekForward(int pos) {
-  pos_ = pos;
-  ASSERT(pushback_buffer()->is_empty());
-  stream_->Seek(pos);
+// Move the cursor forward to point at the next following UTF-8 character start
+// in the buffer.
+static inline void Utf8CharacterForward(const byte* buffer, unsigned* cursor) {
+  byte character = buffer[(*cursor)++];
+  if (character > unibrow::Utf8::kMaxOneByteChar) {
+    // First character of a multi-byte character encoding.
+    // The number of most-significant one-bits determines the length of the
+    // encoding:
+    //  110..... - (0xCx, 0xDx) one additional byte (minimum).
+    //  1110.... - (0xEx) two additional bytes.
+    //  11110... - (0xFx) three additional bytes (maximum).
+    ASSERT(IsUtf8MultiCharacterStart(character));
+    // Additional bytes is:
+    // 1 if value in range 0xC0 .. 0xDF.
+    // 2 if value in range 0xE0 .. 0xEF.
+    // 3 if value in range 0xF0 .. 0xF7.
+    // Encode that in a single value.
+    unsigned additional_bytes =
+        ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
+    *cursor += additional_bytes;
+    ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
+  }
+}
+
+
+void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
+  if (raw_character_position_ > target_position) {
+    // Spool backwards in utf8 buffer.
+    do {
+      Utf8CharacterBack(raw_data_, &raw_data_pos_);
+      raw_character_position_--;
+    } while (raw_character_position_ > target_position);
+    return;
+  }
+  // Spool forwards in the utf8 buffer.
+  while (raw_character_position_ < target_position) {
+    if (raw_data_pos_ == raw_data_length_) return;
+    Utf8CharacterForward(raw_data_, &raw_data_pos_);
+    raw_character_position_++;
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// ExternalTwoByteStringUC16CharacterStream
+
+ExternalTwoByteStringUC16CharacterStream::
+    ~ExternalTwoByteStringUC16CharacterStream() { }
+
+
+ExternalTwoByteStringUC16CharacterStream
+    ::ExternalTwoByteStringUC16CharacterStream(
+        Handle<ExternalTwoByteString> data,
+        int start_position,
+        int end_position)
+    : UC16CharacterStream(),
+      source_(data),
+      raw_data_(data->GetTwoByteData(start_position)) {
+  buffer_cursor_ = raw_data_,
+  buffer_end_ = raw_data_ + (end_position - start_position);
+  pos_ = start_position;
 }
 
 
@@ -115,15 +317,19 @@
   complete_ = true;
 }
 
+
 // ----------------------------------------------------------------------------
 // V8JavaScriptScanner
 
-void V8JavaScriptScanner::Initialize(Handle<String> source,
+V8JavaScriptScanner::V8JavaScriptScanner() : JavaScriptScanner() { }
+
+
+void V8JavaScriptScanner::Initialize(UC16CharacterStream* source,
                                      int literal_flags) {
-  source_ = stream_initializer_.Init(source, NULL, 0, source->length());
+  source_ = source;
+  literal_flags_ = literal_flags | kLiteralIdentifier;
   // Need to capture identifiers in order to recognize "get" and "set"
   // in object literals.
-  literal_flags_ = literal_flags | kLiteralIdentifier;
   Init();
   // Skip initial whitespace allowing HTML comment ends just like
   // after a newline and scan first token.
@@ -133,95 +339,14 @@
 }
 
 
-void V8JavaScriptScanner::Initialize(Handle<String> source,
-                                     unibrow::CharacterStream* stream,
-                                     int literal_flags) {
-  source_ = stream_initializer_.Init(source, stream,
-                                     0, UTF16Buffer::kNoEndPosition);
-  literal_flags_ = literal_flags | kLiteralIdentifier;
-  Init();
-  // Skip initial whitespace allowing HTML comment ends just like
-  // after a newline and scan first token.
-  has_line_terminator_before_next_ = true;
-  SkipWhiteSpace();
-  Scan();
-}
-
-
-void V8JavaScriptScanner::Initialize(Handle<String> source,
-                                     int start_position,
-                                     int end_position,
-                                     int literal_flags) {
-  source_ = stream_initializer_.Init(source, NULL,
-                                     start_position, end_position);
-  literal_flags_ = literal_flags | kLiteralIdentifier;
-  Init();
-  // Skip initial whitespace allowing HTML comment ends just like
-  // after a newline and scan first token.
-  has_line_terminator_before_next_ = true;
-  SkipWhiteSpace();
-  Scan();
-}
-
-
-Token::Value V8JavaScriptScanner::NextCheckStack() {
-  // BUG 1215673: Find a thread safe way to set a stack limit in
-  // pre-parse mode. Otherwise, we cannot safely pre-parse from other
-  // threads.
-  StackLimitCheck check;
-  if (check.HasOverflowed()) {
-    stack_overflow_ = true;
-    current_ = next_;
-    next_.token = Token::ILLEGAL;
-    return current_.token;
-  } else {
-    return Next();
-  }
-}
-
-
-UTF16Buffer* StreamInitializer::Init(Handle<String> source,
-                                     unibrow::CharacterStream* stream,
-                                     int start_position,
-                                     int end_position) {
-  // Either initialize the scanner from a character stream or from a
-  // string.
-  ASSERT(source.is_null() || stream == NULL);
-
-  // Initialize the source buffer.
-  if (!source.is_null() && StringShape(*source).IsExternalTwoByte()) {
-    two_byte_string_buffer_.Initialize(
-        Handle<ExternalTwoByteString>::cast(source),
-        start_position,
-        end_position);
-    return &two_byte_string_buffer_;
-  } else if (!source.is_null() && StringShape(*source).IsExternalAscii()) {
-    ascii_string_buffer_.Initialize(
-        Handle<ExternalAsciiString>::cast(source),
-        start_position,
-        end_position);
-    return &ascii_string_buffer_;
-  } else {
-    if (!source.is_null()) {
-      safe_string_input_buffer_.Reset(source.location());
-      stream = &safe_string_input_buffer_;
-    }
-    char_stream_buffer_.Initialize(source,
-                                   stream,
-                                   start_position,
-                                   end_position);
-    return &char_stream_buffer_;
-  }
-}
-
 // ----------------------------------------------------------------------------
 // JsonScanner
 
-JsonScanner::JsonScanner() {}
+JsonScanner::JsonScanner() : Scanner() { }
 
 
-void JsonScanner::Initialize(Handle<String> source) {
-  source_ = stream_initializer_.Init(source, NULL, 0, source->length());
+void JsonScanner::Initialize(UC16CharacterStream* source) {
+  source_ = source;
   Init();
   // Skip initial whitespace.
   SkipJsonWhiteSpace();
@@ -236,13 +361,7 @@
   // threads.
   current_ = next_;
   // Check for stack-overflow before returning any tokens.
-  StackLimitCheck check;
-  if (check.HasOverflowed()) {
-    stack_overflow_ = true;
-    next_.token = Token::ILLEGAL;
-  } else {
-    ScanJson();
-  }
+  ScanJson();
   return current_.token;
 }
 
diff --git a/src/scanner.h b/src/scanner.h
index acb9b47..572778f 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -35,67 +35,97 @@
 namespace v8 {
 namespace internal {
 
-// UTF16 buffer to read characters from a character stream.
-class CharacterStreamUTF16Buffer: public UTF16Buffer {
+// A buffered character stream based on a random access character
+// source (ReadBlock can be called with pos_ pointing to any position,
+// even positions before the current).
+class BufferedUC16CharacterStream: public UC16CharacterStream {
  public:
-  CharacterStreamUTF16Buffer();
-  virtual ~CharacterStreamUTF16Buffer() {}
-  void Initialize(Handle<String> data,
-                  unibrow::CharacterStream* stream,
-                  int start_position,
-                  int end_position);
-  virtual void PushBack(uc32 ch);
-  virtual uc32 Advance();
-  virtual void SeekForward(int pos);
+  BufferedUC16CharacterStream();
+  virtual ~BufferedUC16CharacterStream();
 
- private:
-  List<uc32> pushback_buffer_;
-  uc32 last_;
-  unibrow::CharacterStream* stream_;
+  virtual void PushBack(uc16 character);
 
-  List<uc32>* pushback_buffer() { return &pushback_buffer_; }
+ protected:
+  static const unsigned kBufferSize = 512;
+  static const unsigned kPushBackStepSize = 16;
+
+  virtual unsigned SlowSeekForward(unsigned delta);
+  virtual bool ReadBlock();
+  virtual void SlowPushBack(uc16 character);
+
+  virtual unsigned BufferSeekForward(unsigned delta) = 0;
+  virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
+
+  const uc16* pushback_limit_;
+  uc16 buffer_[kBufferSize];
+};
+
+
+// Generic string stream.
+class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
+ public:
+  GenericStringUC16CharacterStream(Handle<String> data,
+                                   unsigned start_position,
+                                   unsigned end_position);
+  virtual ~GenericStringUC16CharacterStream();
+
+ protected:
+  virtual unsigned BufferSeekForward(unsigned delta);
+  virtual unsigned FillBuffer(unsigned position, unsigned length);
+
+  Handle<String> string_;
+  unsigned start_position_;
+  unsigned length_;
+};
+
+
+// UC16 stream based on a literal UTF-8 string.
+class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
+ public:
+  Utf8ToUC16CharacterStream(const byte* data, unsigned length);
+  virtual ~Utf8ToUC16CharacterStream();
+
+ protected:
+  virtual unsigned BufferSeekForward(unsigned delta);
+  virtual unsigned FillBuffer(unsigned char_position, unsigned length);
+  void SetRawPosition(unsigned char_position);
+
+  const byte* raw_data_;
+  unsigned raw_data_length_;  // Measured in bytes, not characters.
+  unsigned raw_data_pos_;
+  // The character position of the character at raw_data[raw_data_pos_].
+  // Not necessarily the same as pos_.
+  unsigned raw_character_position_;
 };
 
 
 // UTF16 buffer to read characters from an external string.
-template <typename StringType, typename CharType>
-class ExternalStringUTF16Buffer: public UTF16Buffer {
+class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
  public:
-  ExternalStringUTF16Buffer();
-  virtual ~ExternalStringUTF16Buffer() {}
-  void Initialize(Handle<StringType> data,
-                  int start_position,
-                  int end_position);
-  virtual void PushBack(uc32 ch);
-  virtual uc32 Advance();
-  virtual void SeekForward(int pos);
+  ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
+                                           int start_position,
+                                           int end_position);
+  virtual ~ExternalTwoByteStringUC16CharacterStream();
 
- private:
-  const CharType* raw_data_;  // Pointer to the actual array of characters.
+  virtual void PushBack(uc16 character) {
+    ASSERT(buffer_cursor_ > raw_data_);
+    buffer_cursor_--;
+    pos_--;
+  }
+ protected:
+  virtual unsigned SlowSeekForward(unsigned delta) {
+    // Fast case always handles seeking.
+    return 0;
+  }
+  virtual bool ReadBlock() {
+    // Entire string is read at start.
+    return false;
+  }
+  Handle<ExternalTwoByteString> source_;
+  const uc16* raw_data_;  // Pointer to the actual array of characters.
 };
 
 
-// Initializes a UTF16Buffer as input stream, using one of a number
-// of strategies depending on the available character sources.
-class StreamInitializer {
- public:
-  UTF16Buffer* Init(Handle<String> source,
-                    unibrow::CharacterStream* stream,
-                    int start_position,
-                    int end_position);
- private:
-  // Different UTF16 buffers used to pull characters from. Based on input one of
-  // these will be initialized as the actual data source.
-  CharacterStreamUTF16Buffer char_stream_buffer_;
-  ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
-      two_byte_string_buffer_;
-  ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
-
-  // Used to convert the source string into a character stream when a stream
-  // is not passed to the scanner.
-  SafeStringInputBuffer safe_string_input_buffer_;
-};
-
 // ----------------------------------------------------------------------------
 // V8JavaScriptScanner
 // JavaScript scanner getting its input from either a V8 String or a unicode
@@ -103,21 +133,9 @@
 
 class V8JavaScriptScanner : public JavaScriptScanner {
  public:
-  V8JavaScriptScanner() {}
-
-  Token::Value NextCheckStack();
-
-  // Initialize the Scanner to scan source.
-  void Initialize(Handle<String> source, int literal_flags = kAllLiterals);
-  void Initialize(Handle<String> source,
-                  unibrow::CharacterStream* stream,
+  V8JavaScriptScanner();
+  void Initialize(UC16CharacterStream* source,
                   int literal_flags = kAllLiterals);
-  void Initialize(Handle<String> source,
-                  int start_position, int end_position,
-                  int literal_flags = kAllLiterals);
-
- protected:
-  StreamInitializer stream_initializer_;
 };
 
 
@@ -125,8 +143,7 @@
  public:
   JsonScanner();
 
-  // Initialize the Scanner to scan source.
-  void Initialize(Handle<String> source);
+  void Initialize(UC16CharacterStream* source);
 
   // Returns the next token.
   Token::Value Next();
@@ -140,7 +157,7 @@
   // Recognizes all of the single-character tokens directly, or calls a function
   // to scan a number, string or identifier literal.
   // The only allowed whitespace characters between tokens are tab,
-  // carrige-return, newline and space.
+  // carriage-return, newline and space.
   void ScanJson();
 
   // A JSON number (production JSONNumber) is a subset of the valid JavaScript
@@ -161,60 +178,8 @@
   // are the only valid JSON identifiers (productions JSONBooleanLiteral,
   // JSONNullLiteral).
   Token::Value ScanJsonIdentifier(const char* text, Token::Value token);
-
-  StreamInitializer stream_initializer_;
 };
 
-
-// ExternalStringUTF16Buffer
-template <typename StringType, typename CharType>
-ExternalStringUTF16Buffer<StringType, CharType>::ExternalStringUTF16Buffer()
-    : raw_data_(NULL) { }
-
-
-template <typename StringType, typename CharType>
-void ExternalStringUTF16Buffer<StringType, CharType>::Initialize(
-     Handle<StringType> data,
-     int start_position,
-     int end_position) {
-  ASSERT(!data.is_null());
-  raw_data_ = data->resource()->data();
-
-  ASSERT(end_position <= data->length());
-  if (start_position > 0) {
-    SeekForward(start_position);
-  }
-  end_ =
-      end_position != kNoEndPosition ? end_position : data->length();
-}
-
-
-template <typename StringType, typename CharType>
-uc32 ExternalStringUTF16Buffer<StringType, CharType>::Advance() {
-  if (pos_ < end_) {
-    return raw_data_[pos_++];
-  } else {
-    // note: currently the following increment is necessary to avoid a
-    // test-parser problem!
-    pos_++;
-    return static_cast<uc32>(-1);
-  }
-}
-
-
-template <typename StringType, typename CharType>
-void ExternalStringUTF16Buffer<StringType, CharType>::PushBack(uc32 ch) {
-  pos_--;
-  ASSERT(pos_ >= Scanner::kCharacterLookaheadBufferSize);
-  ASSERT(raw_data_[pos_ - Scanner::kCharacterLookaheadBufferSize] == ch);
-}
-
-
-template <typename StringType, typename CharType>
-void ExternalStringUTF16Buffer<StringType, CharType>::SeekForward(int pos) {
-  pos_ = pos;
-}
-
 } }  // namespace v8::internal
 
 #endif  // V8_SCANNER_H_
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index b210ae7..dd49a4e 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -109,9 +109,14 @@
     return reinterpret_cast<SerializedScopeInfo*>(object);
   }
 
-  // Does this scope call eval.
+  // Does this scope call eval?
   bool CallsEval();
 
+  // Does this scope have an arguments shadow?
+  bool HasArgumentsShadow() {
+    return StackSlotIndex(Heap::arguments_shadow_symbol()) >= 0;
+  }
+
   // Return the number of stack slots for code.
   int NumberOfStackSlots();
 
diff --git a/src/scopes.cc b/src/scopes.cc
index 5ff250f..3565e11 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -291,13 +291,11 @@
 }
 
 
-VariableProxy* Scope::NewTemporary(Handle<String> name) {
-  Variable* var = new Variable(this, name, Variable::TEMPORARY, true,
-                               Variable::NORMAL);
-  VariableProxy* tmp = new VariableProxy(name, false, false);
-  tmp->BindTo(var);
+Variable* Scope::NewTemporary(Handle<String> name) {
+  Variable* var =
+      new Variable(this, name, Variable::TEMPORARY, true, Variable::NORMAL);
   temps_.Add(var);
-  return tmp;
+  return var;
 }
 
 
@@ -861,11 +859,13 @@
           // allocated.
           arguments_shadow_->is_accessed_from_inner_scope_ = true;
         }
-        var->rewrite_ =
+        Property* rewrite =
             new Property(new VariableProxy(arguments_shadow_),
                          new Literal(Handle<Object>(Smi::FromInt(i))),
                          RelocInfo::kNoPosition,
                          Property::SYNTHETIC);
+        rewrite->set_is_arguments_access(true);
+        var->rewrite_ = rewrite;
       }
     }
 
diff --git a/src/scopes.h b/src/scopes.h
index 526c3d3..d909b81 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -105,7 +105,7 @@
   static bool Analyze(CompilationInfo* info);
 
   // The scope name is only used for printing/debugging.
-  void SetScopeName(Handle<String> scope_name)  { scope_name_ = scope_name; }
+  void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
 
   virtual void Initialize(bool inside_with);
 
@@ -156,11 +156,11 @@
   // such a variable again if it was added; otherwise this is a no-op.
   void RemoveUnresolved(VariableProxy* var);
 
-  // Creates a new temporary variable in this scope and binds a proxy to it.
-  // The name is only used for printing and cannot be used to find the variable.
-  // In particular, the only way to get hold of the temporary is by keeping the
-  // VariableProxy* around.
-  virtual VariableProxy* NewTemporary(Handle<String> name);
+  // Creates a new temporary variable in this scope.  The name is only used
+  // for printing and cannot be used to find the variable.  In particular,
+  // the only way to get hold of the temporary is by keeping the Variable*
+  // around.
+  virtual Variable* NewTemporary(Handle<String> name);
 
   // Adds the specific declaration node to the list of declarations in
   // this scope. The declarations are processed as part of entering
@@ -188,10 +188,10 @@
   // Scope-specific info.
 
   // Inform the scope that the corresponding code contains a with statement.
-  void RecordWithStatement()  { scope_contains_with_ = true; }
+  void RecordWithStatement() { scope_contains_with_ = true; }
 
   // Inform the scope that the corresponding code contains an eval call.
-  void RecordEvalCall()  { scope_calls_eval_ = true; }
+  void RecordEvalCall() { scope_calls_eval_ = true; }
 
 
   // ---------------------------------------------------------------------------
@@ -423,7 +423,7 @@
     return NULL;
   }
 
-  virtual VariableProxy* NewTemporary(Handle<String> name)  { return NULL; }
+  virtual Variable* NewTemporary(Handle<String> name)  { return NULL; }
 
   virtual bool HasTrivialOuterContext() const {
     return (nesting_level_ == 0 || inside_with_level_ <= 0);
diff --git a/src/serialize.cc b/src/serialize.cc
index 15fed44..00a601f 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -470,6 +470,34 @@
       UNCLASSIFIED,
       32,
       "HandleScope::level");
+  Add(ExternalReference::new_deoptimizer_function().address(),
+      UNCLASSIFIED,
+      33,
+      "Deoptimizer::New()");
+  Add(ExternalReference::compute_output_frames_function().address(),
+      UNCLASSIFIED,
+      34,
+      "Deoptimizer::ComputeOutputFrames()");
+  Add(ExternalReference::address_of_min_int().address(),
+      UNCLASSIFIED,
+      35,
+      "LDoubleConstant::min_int");
+  Add(ExternalReference::address_of_one_half().address(),
+      UNCLASSIFIED,
+      36,
+      "LDoubleConstant::one_half");
+  Add(ExternalReference::address_of_negative_infinity().address(),
+      UNCLASSIFIED,
+      37,
+      "LDoubleConstant::negative_infinity");
+  Add(ExternalReference::power_double_double_function().address(),
+      UNCLASSIFIED,
+      38,
+      "power_double_double_function");
+  Add(ExternalReference::power_double_int_function().address(),
+      UNCLASSIFIED,
+      39,
+      "power_double_int_function");
 }
 
 
@@ -1370,6 +1398,13 @@
 }
 
 
+void Serializer::ObjectSerializer::VisitGlobalPropertyCell(RelocInfo* rinfo) {
+  // We shouldn't have any global property cell references in code
+  // objects in the snapshot.
+  UNREACHABLE();
+}
+
+
 void Serializer::ObjectSerializer::VisitExternalAsciiString(
     v8::String::ExternalAsciiStringResource** resource_pointer) {
   Address references_start = reinterpret_cast<Address>(resource_pointer);
diff --git a/src/serialize.h b/src/serialize.h
index 92a5149..e80c302 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -449,6 +449,7 @@
     void VisitExternalReferences(Address* start, Address* end);
     void VisitCodeTarget(RelocInfo* target);
     void VisitCodeEntry(Address entry_address);
+    void VisitGlobalPropertyCell(RelocInfo* rinfo);
     void VisitRuntimeEntry(RelocInfo* reloc);
     // Used for seralizing the external strings that hold the natives source.
     void VisitExternalAsciiString(
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 7806223..b5ee1e4 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -412,6 +412,14 @@
 }
 
 
+bool PagedSpace::SafeContains(Address addr) {
+  if (!MemoryAllocator::SafeIsInAPageChunk(addr)) return false;
+  Page* p = Page::FromAddress(addr);
+  if (!p->is_valid()) return false;
+  return MemoryAllocator::IsPageInSpace(p, this);
+}
+
+
 // Try linear allocation in the page of alloc_info's allocation top.  Does
 // not contain slow case logic (eg, move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
@@ -460,16 +468,20 @@
 // -----------------------------------------------------------------------------
 // LargeObjectChunk
 
-HeapObject* LargeObjectChunk::GetObject() {
+Address LargeObjectChunk::GetStartAddress() {
   // Round the chunk address up to the nearest page-aligned address
   // and return the heap object in that page.
   Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  return HeapObject::FromAddress(page->ObjectAreaStart());
+  return page->ObjectAreaStart();
 }
 
 
+void LargeObjectChunk::Free(Executability executable) {
+  MemoryAllocator::FreeRawMemory(address(), size(), executable);
+}
+
 // -----------------------------------------------------------------------------
-// LargeObjectSpace
+// NewSpace
 
 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
                                            AllocationInfo* alloc_info) {
@@ -489,6 +501,18 @@
 }
 
 
+template <typename StringType>
+void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
+  ASSERT(length <= string->length());
+  ASSERT(string->IsSeqString());
+  ASSERT(string->address() + StringType::SizeFor(string->length()) ==
+         allocation_info_.top);
+  allocation_info_.top =
+      string->address() + StringType::SizeFor(length);
+  string->set_length(length);
+}
+
+
 bool FreeListNode::IsFreeListNode(HeapObject* object) {
   return object->map() == Heap::raw_unchecked_byte_array_map()
       || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
diff --git a/src/spaces.cc b/src/spaces.cc
index 239c9cd..fca1032 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -333,6 +333,11 @@
 }
 
 
+bool MemoryAllocator::SafeIsInAPageChunk(Address addr) {
+  return InInitialChunk(addr) || InAllocatedChunks(addr);
+}
+
+
 void MemoryAllocator::TearDown() {
   for (int i = 0; i < max_nof_chunks_; i++) {
     if (chunks_[i].address() != NULL) DeleteChunk(i);
@@ -346,6 +351,10 @@
     initial_chunk_ = NULL;
   }
 
+  FreeChunkTables(&chunk_table_[0],
+                  kChunkTableTopLevelEntries,
+                  kChunkTableLevels);
+
   ASSERT(top_ == max_nof_chunks_);  // all chunks are free
   top_ = 0;
   capacity_ = 0;
@@ -355,6 +364,22 @@
 }
 
 
+void MemoryAllocator::FreeChunkTables(uintptr_t* array, int len, int level) {
+  for (int i = 0; i < len; i++) {
+    if (array[i] != kUnusedChunkTableEntry) {
+      uintptr_t* subarray = reinterpret_cast<uintptr_t*>(array[i]);
+      if (level > 1) {
+        array[i] = kUnusedChunkTableEntry;
+        FreeChunkTables(subarray, 1 << kChunkTableBitsPerLevel, level - 1);
+      } else {
+        array[i] = kUnusedChunkTableEntry;
+      }
+      delete[] subarray;
+    }
+  }
+}
+
+
 void* MemoryAllocator::AllocateRawMemory(const size_t requested,
                                          size_t* allocated,
                                          Executability executable) {
@@ -488,25 +513,19 @@
 }
 
 
-Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages,
+Page* MemoryAllocator::AllocatePages(int requested_pages,
+                                     int* allocated_pages,
                                      PagedSpace* owner) {
   if (requested_pages <= 0) return Page::FromAddress(NULL);
   size_t chunk_size = requested_pages * Page::kPageSize;
 
-  // There is not enough space to guarantee the desired number pages can be
-  // allocated.
-  if (size_ + static_cast<int>(chunk_size) > capacity_) {
-    // Request as many pages as we can.
-    chunk_size = capacity_ - size_;
-    requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
-
-    if (requested_pages <= 0) return Page::FromAddress(NULL);
-  }
   void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
   if (chunk == NULL) return Page::FromAddress(NULL);
   LOG(NewEvent("PagedChunk", chunk, chunk_size));
 
   *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+  // We may 'lose' a page due to alignment.
+  ASSERT(*allocated_pages >= kPagesPerChunk - 1);
   if (*allocated_pages == 0) {
     FreeRawMemory(chunk, chunk_size, owner->executable());
     LOG(DeleteEvent("PagedChunk", chunk));
@@ -518,7 +537,11 @@
 
   ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
   PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+  Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+
+  AddToAllocatedChunks(static_cast<Address>(chunk), chunk_size);
+
+  return new_pages;
 }
 
 
@@ -675,6 +698,7 @@
     initial_chunk_->Uncommit(c.address(), c.size());
     Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
   } else {
+    RemoveFromAllocatedChunks(c.address(), c.size());
     LOG(DeleteEvent("PagedChunk", c.address()));
     ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
     size_t size = c.size();
@@ -788,6 +812,123 @@
 }
 
 
+void MemoryAllocator::AddToAllocatedChunks(Address addr, intptr_t size) {
+  ASSERT(size == kChunkSize);
+  uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
+  AddChunkUsingAddress(int_address, int_address);
+  AddChunkUsingAddress(int_address, int_address + size - 1);
+}
+
+
+void MemoryAllocator::AddChunkUsingAddress(uintptr_t chunk_start,
+                                           uintptr_t chunk_index_base) {
+  uintptr_t* fine_grained = AllocatedChunksFinder(
+      chunk_table_,
+      chunk_index_base,
+      kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
+      kCreateTablesAsNeeded);
+  int index = FineGrainedIndexForAddress(chunk_index_base);
+  if (fine_grained[index] != kUnusedChunkTableEntry) index++;
+  ASSERT(fine_grained[index] == kUnusedChunkTableEntry);
+  fine_grained[index] = chunk_start;
+}
+
+
+void MemoryAllocator::RemoveFromAllocatedChunks(Address addr, intptr_t size) {
+  ASSERT(size == kChunkSize);
+  uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
+  RemoveChunkFoundUsingAddress(int_address, int_address);
+  RemoveChunkFoundUsingAddress(int_address, int_address + size - 1);
+}
+
+
+void MemoryAllocator::RemoveChunkFoundUsingAddress(
+    uintptr_t chunk_start,
+    uintptr_t chunk_index_base) {
+  uintptr_t* fine_grained = AllocatedChunksFinder(
+      chunk_table_,
+      chunk_index_base,
+      kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
+      kDontCreateTables);
+  // Can't remove an entry that's not there.
+  ASSERT(fine_grained != kUnusedChunkTableEntry);
+  int index = FineGrainedIndexForAddress(chunk_index_base);
+  ASSERT(fine_grained[index] != kUnusedChunkTableEntry);
+  if (fine_grained[index] != chunk_start) {
+    index++;
+    ASSERT(fine_grained[index] == chunk_start);
+    fine_grained[index] = kUnusedChunkTableEntry;
+  } else {
+    // If only one of the entries is used it must be the first, since
+    // InAllocatedChunks relies on that.  Move things around so that this is
+    // the case.
+    fine_grained[index] = fine_grained[index + 1];
+    fine_grained[index + 1] = kUnusedChunkTableEntry;
+  }
+}
+
+
+bool MemoryAllocator::InAllocatedChunks(Address addr) {
+  uintptr_t int_address = reinterpret_cast<uintptr_t>(addr);
+  uintptr_t* fine_grained = AllocatedChunksFinder(
+      chunk_table_,
+      int_address,
+      kChunkSizeLog2 + (kChunkTableLevels - 1) * kChunkTableBitsPerLevel,
+      kDontCreateTables);
+  if (fine_grained == NULL) return false;
+  int index = FineGrainedIndexForAddress(int_address);
+  if (fine_grained[index] == kUnusedChunkTableEntry) return false;
+  uintptr_t entry = fine_grained[index];
+  if (entry <= int_address && entry + kChunkSize > int_address) return true;
+  index++;
+  if (fine_grained[index] == kUnusedChunkTableEntry) return false;
+  entry = fine_grained[index];
+  if (entry <= int_address && entry + kChunkSize > int_address) return true;
+  return false;
+}
+
+
+uintptr_t* MemoryAllocator::AllocatedChunksFinder(
+    uintptr_t* table,
+    uintptr_t address,
+    int bit_position,
+    CreateTables create_as_needed) {
+  if (bit_position == kChunkSizeLog2) {
+    return table;
+  }
+  ASSERT(bit_position >= kChunkSizeLog2 + kChunkTableBitsPerLevel);
+  int index =
+      ((address >> bit_position) &
+       ((V8_INTPTR_C(1) << kChunkTableBitsPerLevel) - 1));
+  uintptr_t more_fine_grained_address =
+      address & ((V8_INTPTR_C(1) << bit_position) - 1);
+  ASSERT((table == chunk_table_ && index < kChunkTableTopLevelEntries) ||
+         (table != chunk_table_ && index < 1 << kChunkTableBitsPerLevel));
+  uintptr_t* more_fine_grained_table =
+      reinterpret_cast<uintptr_t*>(table[index]);
+  if (more_fine_grained_table == kUnusedChunkTableEntry) {
+    if (create_as_needed == kDontCreateTables) return NULL;
+    int words_needed = 1 << kChunkTableBitsPerLevel;
+    if (bit_position == kChunkTableBitsPerLevel + kChunkSizeLog2) {
+      words_needed =
+          (1 << kChunkTableBitsPerLevel) * kChunkTableFineGrainedWordsPerEntry;
+    }
+    more_fine_grained_table = new uintptr_t[words_needed];
+    for (int i = 0; i < words_needed; i++) {
+      more_fine_grained_table[i] = kUnusedChunkTableEntry;
+    }
+    table[index] = reinterpret_cast<uintptr_t>(more_fine_grained_table);
+  }
+  return AllocatedChunksFinder(
+      more_fine_grained_table,
+      more_fine_grained_address,
+      bit_position - kChunkTableBitsPerLevel,
+      create_as_needed);
+}
+
+
+uintptr_t MemoryAllocator::chunk_table_[kChunkTableTopLevelEntries];
+
 
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
@@ -1010,7 +1151,10 @@
 
   int available_pages =
       static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
-  if (available_pages <= 0) return false;
+  // We don't want to have to handle small chunks near the end so if there are
+  // not kPagesPerChunk pages available without exceeding the max capacity then
+  // act as if memory has run out.
+  if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
 
   int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
   Page* p = MemoryAllocator::AllocatePages(desired_pages, &desired_pages, this);
@@ -1544,6 +1688,7 @@
   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
     switch (static_cast<Code::Kind>(i)) {
       CASE(FUNCTION);
+      CASE(OPTIMIZED_FUNCTION);
       CASE(STUB);
       CASE(BUILTIN);
       CASE(LOAD_IC);
@@ -1553,6 +1698,8 @@
       CASE(CALL_IC);
       CASE(KEYED_CALL_IC);
       CASE(BINARY_OP_IC);
+      CASE(TYPE_RECORDING_BINARY_OP_IC);
+      CASE(COMPARE_IC);
     }
   }
 
@@ -2697,32 +2844,40 @@
 // LargeObjectChunk
 
 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
-                                        size_t* chunk_size,
                                         Executability executable) {
   size_t requested = ChunkSizeFor(size_in_bytes);
-  void* mem = MemoryAllocator::AllocateRawMemory(requested,
-                                                 chunk_size,
-                                                 executable);
+  size_t size;
+  void* mem = MemoryAllocator::AllocateRawMemory(requested, &size, executable);
   if (mem == NULL) return NULL;
-  LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
-  if (*chunk_size < requested) {
-    MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
+
+  // The start of the chunk may be overlayed with a page so we have to
+  // make sure that the page flags fit in the size field.
+  ASSERT((size & Page::kPageFlagMask) == 0);
+
+  LOG(NewEvent("LargeObjectChunk", mem, size));
+  if (size < requested) {
+    MemoryAllocator::FreeRawMemory(mem, size, executable);
     LOG(DeleteEvent("LargeObjectChunk", mem));
     return NULL;
   }
-  ObjectSpace space =
-      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
-  MemoryAllocator::PerformAllocationCallback(space,
-                                             kAllocationActionAllocate,
-                                             *chunk_size);
-  return reinterpret_cast<LargeObjectChunk*>(mem);
+
+  ObjectSpace space = (executable == EXECUTABLE)
+      ? kObjectSpaceCodeSpace
+      : kObjectSpaceLoSpace;
+  MemoryAllocator::PerformAllocationCallback(
+      space, kAllocationActionAllocate, size);
+
+  LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
+  chunk->size_ = size;
+  return chunk;
 }
 
 
 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
   int os_alignment = static_cast<int>(OS::AllocateAlignment());
-  if (os_alignment < Page::kPageSize)
+  if (os_alignment < Page::kPageSize) {
     size_in_bytes += (Page::kPageSize - os_alignment);
+  }
   return size_in_bytes + Page::kObjectStartOffset;
 }
 
@@ -2803,27 +2958,24 @@
     return Failure::RetryAfterGC(identity());
   }
 
-  size_t chunk_size;
-  LargeObjectChunk* chunk =
-      LargeObjectChunk::New(requested_size, &chunk_size, executable);
+  LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
   if (chunk == NULL) {
     return Failure::RetryAfterGC(identity());
   }
 
-  size_ += static_cast<int>(chunk_size);
+  size_ += static_cast<int>(chunk->size());
   objects_size_ += requested_size;
   page_count_++;
   chunk->set_next(first_chunk_);
-  chunk->set_size(chunk_size);
   first_chunk_ = chunk;
 
   // Initialize page header.
   Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
   Address object_address = page->ObjectAreaStart();
+
   // Clear the low order bit of the second word in the page to flag it as a
   // large object page.  If the chunk_size happened to be written there, its
   // low order bit should already be clear.
-  ASSERT((chunk_size & 0x1) == 0);
   page->SetIsLargeObjectPage(true);
   page->SetIsPageExecutable(executable);
   page->SetRegionMarks(Page::kAllRegionsCleanMarks);
diff --git a/src/spaces.h b/src/spaces.h
index 1b99c56..4f2d07b 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -609,6 +609,9 @@
     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
   }
 
+  // Sanity check on a pointer.
+  static bool SafeIsInAPageChunk(Address addr);
+
   // Links two pages.
   static inline void SetNextPage(Page* prev, Page* next);
 
@@ -650,23 +653,50 @@
   static void ReportStatistics();
 #endif
 
+  static void AddToAllocatedChunks(Address addr, intptr_t size);
+  static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
+  // Note: This only checks the regular chunks, not the odd-sized initial
+  // chunk.
+  static bool InAllocatedChunks(Address addr);
+
   // Due to encoding limitation, we can only have 8K chunks.
   static const int kMaxNofChunks = 1 << kPageSizeBits;
   // If a chunk has at least 16 pages, the maximum heap size is about
   // 8K * 8K * 16 = 1G bytes.
 #ifdef V8_TARGET_ARCH_X64
   static const int kPagesPerChunk = 32;
+  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
+  static const int kPagesPerChunkLog2 = 5;
+  static const int kChunkTableLevels = 4;
+  static const int kChunkTableBitsPerLevel = 12;
 #else
   static const int kPagesPerChunk = 16;
+  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
+  static const int kPagesPerChunkLog2 = 4;
+  static const int kChunkTableLevels = 2;
+  static const int kChunkTableBitsPerLevel = 8;
 #endif
-  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
 
  private:
+  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+  static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
+  static const int kChunkTableTopLevelEntries =
+      1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
+          (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
+
+  // The chunks are not chunk-size aligned so for a given chunk-sized area of
+  // memory there can be two chunks that cover it.
+  static const int kChunkTableFineGrainedWordsPerEntry = 2;
+  static const uintptr_t kUnusedChunkTableEntry = 0;
+
   // Maximum space size in bytes.
   static intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
   static intptr_t capacity_executable_;
 
+  // Top level table to track whether memory is part of a chunk or not.
+  static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
+
   // Allocated space size in bytes.
   static intptr_t size_;
   // Allocated executable space size in bytes.
@@ -725,6 +755,28 @@
   // Frees a chunk.
   static void DeleteChunk(int chunk_id);
 
+  // Helpers to maintain and query the chunk tables.
+  static void AddChunkUsingAddress(
+      uintptr_t chunk_start,        // Where the chunk starts.
+      uintptr_t chunk_index_base);  // Used to place the chunk in the tables.
+  static void RemoveChunkFoundUsingAddress(
+      uintptr_t chunk_start,        // Where the chunk starts.
+      uintptr_t chunk_index_base);  // Used to locate the entry in the tables.
+  // Controls whether the lookup creates intermediate levels of tables as
+  // needed.
+  enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
+  static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
+                                          uintptr_t address,
+                                          int bit_position,
+                                          CreateTables create_as_needed);
+  static void FreeChunkTables(uintptr_t* array, int length, int level);
+  static int FineGrainedIndexForAddress(uintptr_t address) {
+    int index = ((address >> kChunkSizeLog2) &
+        ((1 << kChunkTableBitsPerLevel) - 1));
+    return index * kChunkTableFineGrainedWordsPerEntry;
+  }
+
+
   // Basic check whether a chunk id is in the valid range.
   static inline bool IsValidChunkId(int chunk_id);
 
@@ -1019,6 +1071,8 @@
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
+  // Never crashes even if a is not a valid pointer.
+  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -1588,6 +1642,11 @@
 
   virtual bool ReserveSpace(int bytes);
 
+  // Resizes a sequential string which must be the most recent thing that was
+  // allocated in new space.
+  template <typename StringType>
+  inline void ShrinkStringAtAllocationBoundary(String* string, int len);
+
 #ifdef ENABLE_HEAP_PROTECTION
   // Protect/unprotect the space by marking it read-only/writable.
   virtual void Protect();
@@ -2062,12 +2121,6 @@
     accounting_stats_.DeallocateBytes(accounting_stats_.Size());
     accounting_stats_.AllocateBytes(new_size);
 
-    // Flush allocation watermarks.
-    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
-      p->SetAllocationWatermark(p->AllocationTop());
-    }
-    top_page->SetAllocationWatermark(new_top);
-
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
       intptr_t actual_size = 0;
@@ -2138,10 +2191,10 @@
   // Allocates a new LargeObjectChunk that contains a large object page
   // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
   // object) bytes after the object area start of that page.
-  // The allocated chunk size is set in the output parameter chunk_size.
-  static LargeObjectChunk* New(int size_in_bytes,
-                               size_t* chunk_size,
-                               Executability executable);
+  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
+
+  // Free the memory associated with the chunk.
+  inline void Free(Executability executable);
 
   // Interpret a raw address as a large object chunk.
   static LargeObjectChunk* FromAddress(Address address) {
@@ -2154,12 +2207,13 @@
   // Accessors for the fields of the chunk.
   LargeObjectChunk* next() { return next_; }
   void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
-
   size_t size() { return size_ & ~Page::kPageFlagMask; }
-  void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
+
+  // Compute the start address in the chunk.
+  inline Address GetStartAddress();
 
   // Returns the object in this chunk.
-  inline HeapObject* GetObject();
+  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
 
   // Given a requested size returns the physical size of a chunk to be
   // allocated.
@@ -2176,7 +2230,7 @@
   // A pointer to the next large object chunk in the space or NULL.
   LargeObjectChunk* next_;
 
-  // The size of this chunk.
+  // The total size of this chunk.
   size_t size_;
 
  public:
diff --git a/src/string-stream.cc b/src/string-stream.cc
index d1859a2..7abd1bb 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -264,7 +264,7 @@
 }
 
 
-void StringStream::OutputToStdOut() {
+void StringStream::OutputToFile(FILE* out) {
   // Dump the output to stdout, but make sure to break it up into
   // manageable chunks to avoid losing parts of the output in the OS
   // printing code. This is a problem on Windows in particular; see
@@ -273,10 +273,10 @@
   for (unsigned next; (next = position + 2048) < length_; position = next) {
     char save = buffer_[next];
     buffer_[next] = '\0';
-    internal::PrintF("%s", &buffer_[position]);
+    internal::PrintF(out, "%s", &buffer_[position]);
     buffer_[next] = save;
   }
-  internal::PrintF("%s", &buffer_[position]);
+  internal::PrintF(out, "%s", &buffer_[position]);
 }
 
 
diff --git a/src/string-stream.h b/src/string-stream.h
index 323a6d6..b3f2e0d 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -138,10 +138,12 @@
            FmtElm arg3);
 
   // Getting the message out.
-  void OutputToStdOut();
+  void OutputToFile(FILE* out);
+  void OutputToStdOut() { OutputToFile(stdout); }
   void Log();
   Handle<String> ToString();
   SmartPointer<const char> ToCString() const;
+  int length() const { return length_; }
 
   // Object printing support.
   void PrintName(Object* o);
diff --git a/src/string.js b/src/string.js
index 3b3c82b..9527599 100644
--- a/src/string.js
+++ b/src/string.js
@@ -101,28 +101,28 @@
 
 
 // ECMA-262 section 15.5.4.7
-function StringIndexOf(searchString /* position */) {  // length == 1
-  var subject_str = TO_STRING_INLINE(this);
-  var pattern_str = TO_STRING_INLINE(searchString);
-  var subject_str_len = subject_str.length;
-  var pattern_str_len = pattern_str.length;
+function StringIndexOf(pattern /* position */) {  // length == 1
+  var subject = TO_STRING_INLINE(this);
+  var pattern = TO_STRING_INLINE(pattern);
+  var subject_len = subject.length;
+  var pattern_len = pattern.length;
   var index = 0;
   if (%_ArgumentsLength() > 1) {
     var arg1 = %_Arguments(1);  // position
     index = TO_INTEGER(arg1);
   }
   if (index < 0) index = 0;
-  if (index > subject_str_len) index = subject_str_len;
-  if (pattern_str_len + index > subject_str_len) return -1;
-  return %StringIndexOf(subject_str, pattern_str, index);
+  if (index > subject_len) index = subject_len;
+  if (pattern_len + index > subject_len) return -1;
+  return %StringIndexOf(subject, pattern, index);
 }
 
 
 // ECMA-262 section 15.5.4.8
-function StringLastIndexOf(searchString /* position */) {  // length == 1
+function StringLastIndexOf(pat /* position */) {  // length == 1
   var sub = TO_STRING_INLINE(this);
   var subLength = sub.length;
-  var pat = TO_STRING_INLINE(searchString);
+  var pat = TO_STRING_INLINE(pat);
   var patLength = pat.length;
   var index = subLength - patLength;
   if (%_ArgumentsLength() > 1) {
@@ -150,10 +150,8 @@
 // do anything locale specific.
 function StringLocaleCompare(other) {
   if (%_ArgumentsLength() === 0) return 0;
-
-  var this_str = TO_STRING_INLINE(this);
-  var other_str = TO_STRING_INLINE(other);
-  return %StringLocaleCompare(this_str, other_str);
+  return %StringLocaleCompare(TO_STRING_INLINE(this), 
+                              TO_STRING_INLINE(other));
 }
 
 
@@ -161,7 +159,7 @@
 function StringMatch(regexp) {
   var subject = TO_STRING_INLINE(this);
   if (IS_REGEXP(regexp)) {
-    if (!regexp.global) return regexp.exec(subject);
+    if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
     %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
     // lastMatchInfo is defined in regexp.js.
     return %StringMatch(subject, regexp, lastMatchInfo);
@@ -177,9 +175,7 @@
 // otherwise we call the runtime system.
 function SubString(string, start, end) {
   // Use the one character string cache.
-  if (start + 1 == end) {
-    return %_StringCharAt(string, start);
-  }
+  if (start + 1 == end) return %_StringCharAt(string, start);
   return %_SubString(string, start, end);
 }
 
@@ -208,7 +204,10 @@
                                                         replace);
       }
     } else {
-      return StringReplaceRegExp(subject, search, replace);
+      return %StringReplaceRegExpWithString(subject,
+                                            search,
+                                            TO_STRING_INLINE(replace),
+                                            lastMatchInfo);
     }
   }
 
@@ -224,7 +223,11 @@
 
   // Compute the string to replace with.
   if (IS_FUNCTION(replace)) {
-    builder.add(replace.call(null, search, start, subject));
+    builder.add(%_CallFunction(%GetGlobalReceiver(),
+                               search,
+                               start,
+                               subject,
+                               replace));
   } else {
     reusableMatchInfo[CAPTURE0] = start;
     reusableMatchInfo[CAPTURE1] = end;
@@ -239,29 +242,21 @@
 }
 
 
-// Helper function for regular expressions in String.prototype.replace.
-function StringReplaceRegExp(subject, regexp, replace) {
-  return %StringReplaceRegExpWithString(subject,
-                                        regexp,
-                                        TO_STRING_INLINE(replace),
-                                        lastMatchInfo);
-}
-
-
 // Expand the $-expressions in the string and return a new string with
 // the result.
 function ExpandReplacement(string, subject, matchInfo, builder) {
+  var length = string.length;
+  var builder_elements = builder.elements; 
   var next = %StringIndexOf(string, '$', 0);
   if (next < 0) {
-    builder.add(string);
+    if (length > 0) builder_elements.push(string);
     return;
   }
 
   // Compute the number of captures; see ECMA-262, 15.5.4.11, p. 102.
   var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;  // Includes the match.
 
-  if (next > 0) builder.add(SubString(string, 0, next));
-  var length = string.length;
+  if (next > 0) builder_elements.push(SubString(string, 0, next));
 
   while (true) {
     var expansion = '$';
@@ -270,7 +265,7 @@
       var peek = %_StringCharCodeAt(string, position);
       if (peek == 36) {         // $$
         ++position;
-        builder.add('$');
+        builder_elements.push('$');
       } else if (peek == 38) {  // $& - match
         ++position;
         builder.addSpecialSlice(matchInfo[CAPTURE0],
@@ -307,14 +302,14 @@
           // digit capture references, we can only enter here when a
           // single digit capture reference is outside the range of
           // captures.
-          builder.add('$');
+          builder_elements.push('$');
           --position;
         }
       } else {
-        builder.add('$');
+        builder_elements.push('$');
       }
     } else {
-      builder.add('$');
+      builder_elements.push('$');
     }
 
     // Go the the next $ in the string.
@@ -324,13 +319,15 @@
     // haven't reached the end, we need to append the suffix.
     if (next < 0) {
       if (position < length) {
-        builder.add(SubString(string, position, length));
+        builder_elements.push(SubString(string, position, length));
       }
       return;
     }
 
     // Append substring between the previous and the next $ character.
-    builder.add(SubString(string, position, next));
+    if (next > position) {
+      builder_elements.push(SubString(string, position, next));
+    }
   }
 };
 
@@ -408,9 +405,7 @@
         lastMatchInfoOverride = override;
         var func_result =
             %_CallFunction(receiver, elem, match_start, subject, replace);
-        if (!IS_STRING(func_result)) {
-          func_result = NonStringToString(func_result);
-        }
+        func_result = TO_STRING_INLINE(func_result); 
         res[i] = func_result;
         match_start += elem.length;
       }
@@ -424,9 +419,7 @@
         // Use the apply argument as backing for global RegExp properties.
         lastMatchInfoOverride = elem;
         var func_result = replace.apply(null, elem);
-        if (!IS_STRING(func_result)) {
-          func_result = NonStringToString(func_result);
-        }
+        func_result = TO_STRING_INLINE(func_result); 
         res[i] = func_result;
       }
       i++;
@@ -487,8 +480,7 @@
   } else {
     regexp = new $RegExp(re);
   }
-  var s = TO_STRING_INLINE(this);
-  var match = DoRegExpExec(regexp, s, 0);
+  var match = DoRegExpExec(regexp, TO_STRING_INLINE(this), 0);
   if (match) {
     return match[CAPTURE0];
   }
@@ -570,23 +562,22 @@
 
   var currentIndex = 0;
   var startIndex = 0;
+  var startMatch = 0;
   var result = [];
 
   outer_loop:
   while (true) {
 
     if (startIndex === length) {
-      result[result.length] = subject.slice(currentIndex, length);
+      result.push(SubString(subject, currentIndex, length));
       break;
     }
 
-    var matchInfo = splitMatch(separator, subject, currentIndex, startIndex);
-
-    if (IS_NULL(matchInfo)) {
-      result[result.length] = subject.slice(currentIndex, length);
+    var matchInfo = DoRegExpExec(separator, subject, startIndex);
+    if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
+      result.push(SubString(subject, currentIndex, length));
       break;
     }
-
     var endIndex = matchInfo[CAPTURE1];
 
     // We ignore a zero-length match at the currentIndex.
@@ -595,17 +586,26 @@
       continue;
     }
 
-    result[result.length] = SubString(subject, currentIndex, matchInfo[CAPTURE0]);
+    if (currentIndex + 1 == startMatch) {
+      result.push(%_StringCharAt(subject, currentIndex));
+    } else {
+      result.push(%_SubString(subject, currentIndex, startMatch));
+    }
+
     if (result.length === limit) break;
 
-    var num_captures = NUMBER_OF_CAPTURES(matchInfo);
-    for (var i = 2; i < num_captures; i += 2) {
-      var start = matchInfo[CAPTURE(i)];
-      var end = matchInfo[CAPTURE(i + 1)];
-      if (start != -1 && end != -1) {
-        result[result.length] = SubString(subject, start, end);
+    var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
+    for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
+      var start = matchInfo[i++];
+      var end = matchInfo[i++];
+      if (end != -1) {
+        if (start + 1 == end) {
+          result.push(%_StringCharAt(subject, start));
+        } else {
+          result.push(%_SubString(subject, start, end));
+        }
       } else {
-        result[result.length] = void 0;
+        result.push(void 0);
       }
       if (result.length === limit) break outer_loop;
     }
@@ -616,19 +616,6 @@
 }
 
 
-// ECMA-262 section 15.5.4.14
-// Helper function used by split.  This version returns the matchInfo
-// instead of allocating a new array with basically the same information.
-function splitMatch(separator, subject, current_index, start_index) {
-  var matchInfo = DoRegExpExec(separator, subject, start_index);
-  if (matchInfo == null) return null;
-  // Section 15.5.4.14 paragraph two says that we do not allow zero length
-  // matches at the end of the string.
-  if (matchInfo[CAPTURE0] === subject.length) return null;
-  return matchInfo;
-}
-
-
 // ECMA-262 section 15.5.4.15
 function StringSubstring(start, end) {
   var s = TO_STRING_INLINE(this);
@@ -656,7 +643,9 @@
     }
   }
 
-  return SubString(s, start_i, end_i);
+  return (start_i + 1 == end_i
+          ? %_StringCharAt(s, start_i)
+          : %_SubString(s, start_i, end_i));
 }
 
 
@@ -694,7 +683,9 @@
   var end = start + len;
   if (end > s.length) end = s.length;
 
-  return SubString(s, start, end);
+  return (start + 1 == end
+          ? %_StringCharAt(s, start)
+          : %_SubString(s, start, end));
 }
 
 
@@ -847,24 +838,21 @@
 
 ReplaceResultBuilder.prototype.add = function(str) {
   str = TO_STRING_INLINE(str);
-  if (str.length > 0) {
-    var elements = this.elements;
-    elements[elements.length] = str;
-  }
+  if (str.length > 0) this.elements.push(str);
 }
 
 
 ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
   var len = end - start;
   if (start < 0 || len <= 0) return;
-  var elements = this.elements;
   if (start < 0x80000 && len < 0x800) {
-    elements[elements.length] = (start << 11) | len;
+    this.elements.push((start << 11) | len);
   } else {
     // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
     // so -len is a smi.
-    elements[elements.length] = -len;
-    elements[elements.length] = start;
+    var elements = this.elements;
+    elements.push(-len);
+    elements.push(start);
   }
 }
 
@@ -875,11 +863,6 @@
 }
 
 
-function StringToJSON(key) {
-  return CheckJSONPrimitive(this.valueOf());
-}
-
-
 // -------------------------------------------------------------------
 
 function SetupString() {
@@ -929,8 +912,7 @@
     "small", StringSmall,
     "strike", StringStrike,
     "sub", StringSub,
-    "sup", StringSup,
-    "toJSON", StringToJSON
+    "sup", StringSup
   ));
 }
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 5cc009f..86e7201 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -31,6 +31,7 @@
 #include "arguments.h"
 #include "ic-inl.h"
 #include "stub-cache.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -425,6 +426,27 @@
 }
 
 
+MaybeObject* StubCache::ComputeKeyedLoadSpecialized(JSObject* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
+  String* name = Heap::KeyedLoadSpecialized_symbol();
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadSpecialized(receiver);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
+  return code;
+}
+
+
 MaybeObject* StubCache::ComputeStoreField(String* name,
                                           JSObject* receiver,
                                           int field_index,
@@ -449,6 +471,27 @@
 }
 
 
+MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL);
+  String* name = Heap::KeyedStoreSpecialized_symbol();
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedStoreStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
+  return code;
+}
+
+
 MaybeObject* StubCache::ComputeStoreNormal() {
   return Builtins::builtin(Builtins::StoreIC_Normal);
 }
@@ -561,13 +604,13 @@
   JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
 
   // Compute check type based on receiver/holder.
-  StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
+  CheckType check = RECEIVER_MAP_CHECK;
   if (object->IsString()) {
-    check = StubCompiler::STRING_CHECK;
+    check = STRING_CHECK;
   } else if (object->IsNumber()) {
-    check = StubCompiler::NUMBER_CHECK;
+    check = NUMBER_CHECK;
   } else if (object->IsBoolean()) {
-    check = StubCompiler::BOOLEAN_CHECK;
+    check = BOOLEAN_CHECK;
   }
 
   Code::Flags flags =
@@ -589,6 +632,7 @@
           compiler.CompileCallConstant(object, holder, function, name, check);
       if (!maybe_code->ToObject(&code)) return maybe_code;
     }
+    Code::cast(code)->set_check_type(check);
     ASSERT_EQ(flags, Code::cast(code)->flags());
     PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
                             Code::cast(code), name));
@@ -953,6 +997,48 @@
 }
 
 
+void StubCache::CollectMatchingMaps(ZoneMapList* types,
+                                    String* name,
+                                    Code::Flags flags) {
+  for (int i = 0; i < kPrimaryTableSize; i++) {
+    if (primary_[i].key == name) {
+      Map* map = primary_[i].value->FindFirstMap();
+      // Map can be NULL, if the stub is constant function call
+      // with a primitive receiver.
+      if (map == NULL) continue;
+
+      int offset = PrimaryOffset(name, flags, map);
+      if (entry(primary_, offset) == &primary_[i]) {
+        types->Add(Handle<Map>(map));
+      }
+    }
+  }
+
+  for (int i = 0; i < kSecondaryTableSize; i++) {
+    if (secondary_[i].key == name) {
+      Map* map = secondary_[i].value->FindFirstMap();
+      // Map can be NULL, if the stub is constant function call
+      // with a primitive receiver.
+      if (map == NULL) continue;
+
+      // Lookup in primary table and skip duplicates.
+      int primary_offset = PrimaryOffset(name, flags, map);
+      Entry* primary_entry = entry(primary_, primary_offset);
+      if (primary_entry->key == name) {
+        Map* primary_map = primary_entry->value->FindFirstMap();
+        if (map == primary_map) continue;
+      }
+
+      // Lookup in secondary table and add matches.
+      int offset = SecondaryOffset(name, flags, primary_offset);
+      if (entry(secondary_, offset) == &secondary_[i]) {
+        types->Add(Handle<Map>(map));
+      }
+    }
+  }
+}
+
+
 // ------------------------------------------------------------------------
 // StubCompiler implementation.
 
@@ -970,9 +1056,7 @@
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-    state.set_external_callback(getter_address);
-#endif
+    ExternalCallbackScope call_scope(getter_address);
     result = fun(v8::Utils::ToLocal(args.at<String>(4)), info);
   }
   RETURN_IF_SCHEDULED_EXCEPTION();
@@ -996,9 +1080,7 @@
   {
     // Leaving JavaScript.
     VMState state(EXTERNAL);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-    state.set_external_callback(setter_address);
-#endif
+    ExternalCallbackScope call_scope(setter_address);
     fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
   }
   RETURN_IF_SCHEDULED_EXCEPTION();
@@ -1419,25 +1501,31 @@
 }
 
 
-MaybeObject* CallStubCompiler::CompileCustomCall(int generator_id,
+bool CallStubCompiler::HasCustomCallGenerator(BuiltinFunctionId id) {
+#define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
+  CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+  return false;
+}
+
+
+MaybeObject* CallStubCompiler::CompileCustomCall(BuiltinFunctionId id,
                                                  Object* object,
                                                  JSObject* holder,
                                                  JSGlobalPropertyCell* cell,
                                                  JSFunction* function,
                                                  String* fname) {
-  ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
-  switch (generator_id) {
-#define CALL_GENERATOR_CASE(ignored1, ignored2, name)           \
-    case k##name##CallGenerator:                                \
-      return CallStubCompiler::Compile##name##Call(object,      \
-                                                   holder,      \
-                                                   cell,        \
-                                                   function,    \
-                                                   fname);
-    CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
-#undef CALL_GENERATOR_CASE
+#define CALL_GENERATOR_CASE(name)                          \
+  if (id == k##name) {                                     \
+    return CallStubCompiler::Compile##name##Call(object,   \
+                                                 holder,   \
+                                                 cell,     \
+                                                 function, \
+                                                 fname);   \
   }
-  UNREACHABLE();
+  CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
+#undef CALL_GENERATOR_CASE
+  ASSERT(!HasCustomCallGenerator(id));
   return Heap::undefined_value();
 }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index cef5481..a7829a6 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -29,6 +29,7 @@
 #define V8_STUB_CACHE_H_
 
 #include "macro-assembler.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -44,6 +45,7 @@
 
 class SCTableReference;
 
+
 class StubCache : public AllStatic {
  public:
   struct Entry {
@@ -76,9 +78,10 @@
                                                           JSObject* holder,
                                                           Object* value);
 
-  MUST_USE_RESULT static MaybeObject* ComputeLoadInterceptor(String* name,
-                                                             JSObject* receiver,
-                                                             JSObject* holder);
+  MUST_USE_RESULT static MaybeObject* ComputeLoadInterceptor(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder);
 
   MUST_USE_RESULT static MaybeObject* ComputeLoadNormal();
 
@@ -127,6 +130,9 @@
       String* name,
       JSFunction* receiver);
 
+  MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadSpecialized(
+      JSObject* receiver);
+
   // ---
 
   MUST_USE_RESULT static MaybeObject* ComputeStoreField(String* name,
@@ -158,6 +164,9 @@
       int field_index,
       Map* transition = NULL);
 
+  MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
+      JSObject* receiver);
+
   // ---
 
   MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
@@ -244,6 +253,11 @@
   // Clear the lookup table (@ mark compact collection).
   static void Clear();
 
+  // Collect all maps that match the name and flags.
+  static void CollectMatchingMaps(ZoneMapList* types,
+                                  String* name,
+                                  Code::Flags flags);
+
   // Generate code for probing the stub cache table.
   // Arguments extra and extra2 may be used to pass additional scratch
   // registers. Set to no_reg if not needed.
@@ -366,13 +380,6 @@
 // The stub compiler compiles stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  enum CheckType {
-    RECEIVER_MAP_CHECK,
-    STRING_CHECK,
-    NUMBER_CHECK,
-    BOOLEAN_CHECK
-  };
-
   StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
 
   MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
@@ -564,7 +571,7 @@
                                                  bool is_dont_delete);
 
  private:
-  MaybeObject* GetCode(PropertyType type, String* name);
+  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
 };
 
 
@@ -593,6 +600,8 @@
   MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
   MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
 
+  MUST_USE_RESULT MaybeObject* CompileLoadSpecialized(JSObject* receiver);
+
  private:
   MaybeObject* GetCode(PropertyType type, String* name);
 };
@@ -604,6 +613,7 @@
                                                  int index,
                                                  Map* transition,
                                                  String* name);
+
   MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
                                                     AccessorInfo* callbacks,
                                                     String* name);
@@ -615,53 +625,38 @@
 
 
  private:
-  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
+  MaybeObject* GetCode(PropertyType type, String* name);
 };
 
 
 class KeyedStoreStubCompiler: public StubCompiler {
  public:
-  MaybeObject* CompileStoreField(JSObject* object,
-                                 int index,
-                                 Map* transition,
-                                 String* name);
+  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
+                                                 int index,
+                                                 Map* transition,
+                                                 String* name);
+
+  MUST_USE_RESULT MaybeObject* CompileStoreSpecialized(JSObject* receiver);
 
  private:
   MaybeObject* GetCode(PropertyType type, String* name);
 };
 
 
-// List of functions with custom constant call IC stubs.
-//
-// Installation of custom call generators for the selected builtins is
-// handled by the bootstrapper.
-//
-// Each entry has a name of a global object property holding an object
-// optionally followed by ".prototype" (this controls whether the
-// generator is set on the object itself or, in case it's a function,
-// on the its instance prototype), a name of a builtin function on the
-// object (the one the generator is set for), and a name of the
-// generator (used to build ids and generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V)                \
-  V(Array.prototype, push, ArrayPush)               \
-  V(Array.prototype, pop, ArrayPop)                 \
-  V(String.prototype, charCodeAt, StringCharCodeAt) \
-  V(String.prototype, charAt, StringCharAt)         \
-  V(String, fromCharCode, StringFromCharCode)       \
-  V(Math, floor, MathFloor)                         \
-  V(Math, abs, MathAbs)
+// Subset of FUNCTIONS_WITH_ID_LIST with custom constant/global call
+// IC stubs.
+#define CUSTOM_CALL_IC_GENERATORS(V)            \
+  V(ArrayPush)                                  \
+  V(ArrayPop)                                   \
+  V(StringCharCodeAt)                           \
+  V(StringCharAt)                               \
+  V(StringFromCharCode)                         \
+  V(MathFloor)                                  \
+  V(MathAbs)
 
 
 class CallStubCompiler: public StubCompiler {
  public:
-  enum {
-#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, name) \
-    k##name##CallGenerator,
-    CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
-#undef DECLARE_CALL_GENERATOR_ID
-    kNumCallGenerators
-  };
-
   CallStubCompiler(int argc,
                    InLoopFlag in_loop,
                    Code::Kind kind,
@@ -685,16 +680,20 @@
                                                  JSFunction* function,
                                                  String* name);
 
-  // Compiles a custom call constant/global IC using the generator
-  // with given id. For constant calls cell is NULL.
-  MUST_USE_RESULT MaybeObject* CompileCustomCall(int generator_id,
+  static bool HasCustomCallGenerator(BuiltinFunctionId id);
+
+ private:
+  // Compiles a custom call constant/global IC. For constant calls
+  // cell is NULL. Returns undefined if there is no custom call code
+  // for the given function or it can't be generated.
+  MUST_USE_RESULT MaybeObject* CompileCustomCall(BuiltinFunctionId id,
                                                  Object* object,
                                                  JSObject* holder,
                                                  JSGlobalPropertyCell* cell,
                                                  JSFunction* function,
                                                  String* name);
 
-#define DECLARE_CALL_GENERATOR(ignored1, ignored2,  name)                      \
+#define DECLARE_CALL_GENERATOR(name)                                           \
   MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object,             \
                                                    JSObject* holder,           \
                                                    JSGlobalPropertyCell* cell, \
@@ -703,7 +702,6 @@
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
 
- private:
   const ParameterCount arguments_;
   const InLoopFlag in_loop_;
   const Code::Kind kind_;
diff --git a/src/token.h b/src/token.h
index 74d9539..2f5ca1b 100644
--- a/src/token.h
+++ b/src/token.h
@@ -238,6 +238,40 @@
     return EQ <= op && op <= IN;
   }
 
+  static bool IsOrderedCompareOp(Value op) {
+    return op == LT || op == LTE || op == GT || op == GTE;
+  }
+
+  static Value NegateCompareOp(Value op) {
+    ASSERT(IsCompareOp(op));
+    switch (op) {
+      case EQ: return NE;
+      case NE: return EQ;
+      case EQ_STRICT: return NE_STRICT;
+      case LT: return GTE;
+      case GT: return LTE;
+      case LTE: return GT;
+      case GTE: return LT;
+      default:
+        return op;
+    }
+  }
+
+  static Value InvertCompareOp(Value op) {
+    ASSERT(IsCompareOp(op));
+    switch (op) {
+      case EQ: return NE;
+      case NE: return EQ;
+      case EQ_STRICT: return NE_STRICT;
+      case LT: return GT;
+      case GT: return LT;
+      case LTE: return GTE;
+      case GTE: return LTE;
+      default:
+        return op;
+    }
+  }
+
   static bool IsBitOp(Value op) {
     return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
   }
diff --git a/src/top.cc b/src/top.cc
index 1f0d159..3d86d11 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -35,10 +35,14 @@
 #include "platform.h"
 #include "simulator.h"
 #include "string-stream.h"
+#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+Semaphore* Top::runtime_profiler_semaphore_ = NULL;
+#endif
 ThreadLocalTop Top::thread_local_;
 Mutex* Top::break_access_ = OS::CreateMutex();
 
@@ -74,10 +78,12 @@
 #endif
 #endif
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  js_entry_sp_ = 0;
+  js_entry_sp_ = NULL;
+  external_callback_ = NULL;
 #endif
 #ifdef ENABLE_VMSTATE_TRACKING
-  current_vm_state_ = NULL;
+  current_vm_state_ = EXTERNAL;
+  runtime_profiler_state_ = Top::PROF_NOT_IN_JS;
 #endif
   try_catch_handler_address_ = NULL;
   context_ = NULL;
@@ -273,6 +279,11 @@
 void Top::Initialize() {
   CHECK(!initialized);
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  ASSERT(runtime_profiler_semaphore_ == NULL);
+  runtime_profiler_semaphore_ = OS::CreateSemaphore(0);
+#endif
+
   InitializeThreadLocal();
 
   // Only preallocate on the first initialization.
@@ -290,6 +301,11 @@
 
 void Top::TearDown() {
   if (initialized) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+    delete runtime_profiler_semaphore_;
+    runtime_profiler_semaphore_ = NULL;
+#endif
+
     // Remove the external reference to the preallocated stack memory.
     if (preallocated_message_space != NULL) {
       delete preallocated_message_space;
@@ -376,79 +392,85 @@
   StackTraceFrameIterator it;
   int frames_seen = 0;
   while (!it.done() && (frames_seen < limit)) {
-    // Create a JSObject to hold the information for the StackFrame.
-    Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
-
     JavaScriptFrame* frame = it.frame();
-    Handle<JSFunction> fun(JSFunction::cast(frame->function()));
-    Handle<Script> script(Script::cast(fun->shared()->script()));
 
-    if (options & StackTrace::kLineNumber) {
-      int script_line_offset = script->line_offset()->value();
-      int position = frame->code()->SourcePosition(frame->pc());
-      int line_number = GetScriptLineNumber(script, position);
-      // line_number is already shifted by the script_line_offset.
-      int relative_line_number = line_number - script_line_offset;
-      if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
-        Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
-        int start = (relative_line_number == 0) ? 0 :
-            Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
-        int column_offset = position - start;
-        if (relative_line_number == 0) {
-          // For the case where the code is on the same line as the script tag.
-          column_offset += script->column_offset()->value();
+    List<FrameSummary> frames(3);  // Max 2 levels of inlining.
+    frame->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+      // Create a JSObject to hold the information for the StackFrame.
+      Handle<JSObject> stackFrame = Factory::NewJSObject(object_function());
+
+      Handle<JSFunction> fun = frames[i].function();
+      Handle<Script> script(Script::cast(fun->shared()->script()));
+
+      if (options & StackTrace::kLineNumber) {
+        int script_line_offset = script->line_offset()->value();
+        int position = frames[i].code()->SourcePosition(frames[i].pc());
+        int line_number = GetScriptLineNumber(script, position);
+        // line_number is already shifted by the script_line_offset.
+        int relative_line_number = line_number - script_line_offset;
+        if (options & StackTrace::kColumnOffset && relative_line_number >= 0) {
+          Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
+          int start = (relative_line_number == 0) ? 0 :
+              Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
+          int column_offset = position - start;
+          if (relative_line_number == 0) {
+            // For the case where the code is on the same line as the script
+            // tag.
+            column_offset += script->column_offset()->value();
+          }
+          SetProperty(stackFrame, column_key,
+                      Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE);
         }
-        SetProperty(stackFrame, column_key,
-                    Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE);
+        SetProperty(stackFrame, line_key,
+                    Handle<Smi>(Smi::FromInt(line_number + 1)), NONE);
       }
-      SetProperty(stackFrame, line_key,
-                  Handle<Smi>(Smi::FromInt(line_number + 1)), NONE);
-    }
 
-    if (options & StackTrace::kScriptName) {
-      Handle<Object> script_name(script->name());
-      SetProperty(stackFrame, script_key, script_name, NONE);
-    }
-
-    if (options & StackTrace::kScriptNameOrSourceURL) {
-      Handle<Object> script_name(script->name());
-      Handle<JSValue> script_wrapper = GetScriptWrapper(script);
-      Handle<Object> property = GetProperty(script_wrapper,
-                                            name_or_source_url_key);
-      ASSERT(property->IsJSFunction());
-      Handle<JSFunction> method = Handle<JSFunction>::cast(property);
-      bool caught_exception;
-      Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
-                                                 NULL, &caught_exception);
-      if (caught_exception) {
-        result = Factory::undefined_value();
+      if (options & StackTrace::kScriptName) {
+        Handle<Object> script_name(script->name());
+        SetProperty(stackFrame, script_key, script_name, NONE);
       }
-      SetProperty(stackFrame, script_name_or_source_url_key, result, NONE);
-    }
 
-    if (options & StackTrace::kFunctionName) {
-      Handle<Object> fun_name(fun->shared()->name());
-      if (fun_name->ToBoolean()->IsFalse()) {
-        fun_name = Handle<Object>(fun->shared()->inferred_name());
+      if (options & StackTrace::kScriptNameOrSourceURL) {
+        Handle<Object> script_name(script->name());
+        Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+        Handle<Object> property = GetProperty(script_wrapper,
+                                              name_or_source_url_key);
+        ASSERT(property->IsJSFunction());
+        Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+        bool caught_exception;
+        Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+                                                   NULL, &caught_exception);
+        if (caught_exception) {
+          result = Factory::undefined_value();
+        }
+        SetProperty(stackFrame, script_name_or_source_url_key, result, NONE);
       }
-      SetProperty(stackFrame, function_key, fun_name, NONE);
-    }
 
-    if (options & StackTrace::kIsEval) {
-      int type = Smi::cast(script->compilation_type())->value();
-      Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
-          Factory::true_value() : Factory::false_value();
-      SetProperty(stackFrame, eval_key, is_eval, NONE);
-    }
+      if (options & StackTrace::kFunctionName) {
+        Handle<Object> fun_name(fun->shared()->name());
+        if (fun_name->ToBoolean()->IsFalse()) {
+          fun_name = Handle<Object>(fun->shared()->inferred_name());
+        }
+        SetProperty(stackFrame, function_key, fun_name, NONE);
+      }
 
-    if (options & StackTrace::kIsConstructor) {
-      Handle<Object> is_constructor = (frame->IsConstructor()) ?
-          Factory::true_value() : Factory::false_value();
-      SetProperty(stackFrame, constructor_key, is_constructor, NONE);
-    }
+      if (options & StackTrace::kIsEval) {
+        int type = Smi::cast(script->compilation_type())->value();
+        Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
+            Factory::true_value() : Factory::false_value();
+        SetProperty(stackFrame, eval_key, is_eval, NONE);
+      }
 
-    FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
-    frames_seen++;
+      if (options & StackTrace::kIsConstructor) {
+        Handle<Object> is_constructor = (frames[i].is_constructor()) ?
+            Factory::true_value() : Factory::false_value();
+        SetProperty(stackFrame, constructor_key, is_constructor, NONE);
+      }
+
+      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
+      frames_seen++;
+    }
     it.Advance();
   }
 
@@ -1079,15 +1101,4 @@
   return from + sizeof(thread_local_);
 }
 
-
-ExecutionAccess::ExecutionAccess() {
-  Top::break_access_->Lock();
-}
-
-
-ExecutionAccess::~ExecutionAccess() {
-  Top::break_access_->Unlock();
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/top.h b/src/top.h
index bc3a85e..e485de1 100644
--- a/src/top.h
+++ b/src/top.h
@@ -28,7 +28,10 @@
 #ifndef V8_TOP_H_
 #define V8_TOP_H_
 
+#include "atomicops.h"
+#include "compilation-cache.h"
 #include "frames-inl.h"
+#include "runtime-profiler.h"
 #include "simulator.h"
 
 namespace v8 {
@@ -114,10 +117,15 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
   Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
+  Address external_callback_;  // the external callback we're currently in
 #endif
 
 #ifdef ENABLE_VMSTATE_TRACKING
-  VMState* current_vm_state_;
+  StateTag current_vm_state_;
+
+  // Used for communication with the runtime profiler thread.
+  // Possible values are specified in RuntimeProfilerState.
+  Atomic32 runtime_profiler_state_;
 #endif
 
   // Generated code scratch locations.
@@ -267,16 +275,72 @@
   static inline Address* js_entry_sp_address() {
     return &thread_local_.js_entry_sp_;
   }
+
+  static Address external_callback() {
+    return thread_local_.external_callback_;
+  }
+  static void set_external_callback(Address callback) {
+    thread_local_.external_callback_ = callback;
+  }
 #endif
 
 #ifdef ENABLE_VMSTATE_TRACKING
-  static VMState* current_vm_state() {
+  static StateTag current_vm_state() {
     return thread_local_.current_vm_state_;
   }
 
-  static void set_current_vm_state(VMState* state) {
+  static void SetCurrentVMState(StateTag state) {
+    if (RuntimeProfiler::IsEnabled()) {
+      if (state == JS) {
+        // JS or non-JS -> JS transition.
+        RuntimeProfilerState old_state = SwapRuntimeProfilerState(PROF_IN_JS);
+        if (old_state == PROF_NOT_IN_JS_WAITING_FOR_JS) {
+          // If the runtime profiler was waiting, we reset the eager
+          // optimizing data in the compilation cache to get a fresh
+          // start after not running JavaScript code for a while and
+          // signal the runtime profiler so it can resume.
+          CompilationCache::ResetEagerOptimizingData();
+          runtime_profiler_semaphore_->Signal();
+        }
+      } else if (thread_local_.current_vm_state_ == JS) {
+        // JS -> non-JS transition. Update the runtime profiler state.
+        ASSERT(IsInJSState());
+        SetRuntimeProfilerState(PROF_NOT_IN_JS);
+      }
+    }
     thread_local_.current_vm_state_ = state;
   }
+
+  // Called in the runtime profiler thread.
+  // Returns whether the current VM state is set to JS.
+  static bool IsInJSState() {
+    ASSERT(RuntimeProfiler::IsEnabled());
+    return static_cast<RuntimeProfilerState>(
+        NoBarrier_Load(&thread_local_.runtime_profiler_state_)) == PROF_IN_JS;
+  }
+
+  // Called in the runtime profiler thread.
+  // Waits for the VM state to transtion from non-JS to JS. Returns
+  // true when notified of the transition, false when the current
+  // state is not the expected non-JS state.
+  static bool WaitForJSState() {
+    ASSERT(RuntimeProfiler::IsEnabled());
+    // Try to switch to waiting state.
+    RuntimeProfilerState old_state = CompareAndSwapRuntimeProfilerState(
+        PROF_NOT_IN_JS, PROF_NOT_IN_JS_WAITING_FOR_JS);
+    if (old_state == PROF_NOT_IN_JS) {
+      runtime_profiler_semaphore_->Wait();
+      return true;
+    }
+    return false;
+  }
+
+  // When shutting down we join the profiler thread. Doing so while
+  // it's waiting on a semaphore will cause a deadlock, so we have to
+  // wake it up first.
+  static void WakeUpRuntimeProfilerThreadBeforeShutdown() {
+    runtime_profiler_semaphore_->Signal();
+  }
 #endif
 
   // Generated code scratch locations.
@@ -386,6 +450,51 @@
   static const char* kStackOverflowMessage;
 
  private:
+#ifdef ENABLE_VMSTATE_TRACKING
+  // Set of states used when communicating with the runtime profiler.
+  //
+  // The set of possible transitions is divided between the VM and the
+  // profiler threads.
+  //
+  // The VM thread can perform these transitions:
+  //   o IN_JS -> NOT_IN_JS
+  //   o NOT_IN_JS -> IN_JS
+  //   o NOT_IN_JS_WAITING_FOR_JS -> IN_JS notifying the profiler thread
+  //     using the semaphore.
+  // All the above transitions are caused by VM state changes.
+  //
+  // The profiler thread can only perform a single transition
+  // NOT_IN_JS -> NOT_IN_JS_WAITING_FOR_JS before it starts waiting on
+  // the semaphore.
+  enum RuntimeProfilerState {
+    PROF_NOT_IN_JS,
+    PROF_NOT_IN_JS_WAITING_FOR_JS,
+    PROF_IN_JS
+  };
+
+  static void SetRuntimeProfilerState(RuntimeProfilerState state) {
+    NoBarrier_Store(&thread_local_.runtime_profiler_state_, state);
+  }
+
+  static RuntimeProfilerState SwapRuntimeProfilerState(
+      RuntimeProfilerState state) {
+    return static_cast<RuntimeProfilerState>(
+        NoBarrier_AtomicExchange(&thread_local_.runtime_profiler_state_,
+                                 state));
+  }
+
+  static RuntimeProfilerState CompareAndSwapRuntimeProfilerState(
+      RuntimeProfilerState old_state,
+      RuntimeProfilerState state) {
+    return static_cast<RuntimeProfilerState>(
+        NoBarrier_CompareAndSwap(&thread_local_.runtime_profiler_state_,
+                                 old_state,
+                                 state));
+  }
+
+  static Semaphore* runtime_profiler_semaphore_;
+#endif  // ENABLE_VMSTATE_TRACKING
+
   // The context that initiated this JS execution.
   static ThreadLocalTop thread_local_;
   static void InitializeThreadLocal();
@@ -402,6 +511,7 @@
   friend class SaveContext;
   friend class AssertNoContextChange;
   friend class ExecutionAccess;
+  friend class ThreadLocalTop;
 
   static void FillCache();
 };
@@ -471,8 +581,15 @@
 
 class ExecutionAccess BASE_EMBEDDED {
  public:
-  ExecutionAccess();
-  ~ExecutionAccess();
+  ExecutionAccess() { Lock(); }
+  ~ExecutionAccess() { Unlock(); }
+
+  static void Lock() { Top::break_access_->Lock(); }
+  static void Unlock() { Top::break_access_->Unlock(); }
+
+  static bool TryLock() {
+    return Top::break_access_->TryLock();
+  }
 };
 
 } }  // namespace v8::internal
diff --git a/src/type-info.cc b/src/type-info.cc
index 3fc929d..8719439 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -26,7 +26,15 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#include "ast.h"
+#include "compiler.h"
+#include "ic.h"
+#include "macro-assembler.h"
+#include "stub-cache.h"
 #include "type-info.h"
+
+#include "ic-inl.h"
 #include "objects-inl.h"
 
 namespace v8 {
@@ -50,4 +58,303 @@
 }
 
 
+TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code) {
+  Initialize(code);
+}
+
+
+void TypeFeedbackOracle::Initialize(Handle<Code> code) {
+  ASSERT(map_.is_null());  // Only initialize once.
+  map_ = Factory::NewJSObject(Top::object_function());
+  PopulateMap(code);
+}
+
+
+bool TypeFeedbackOracle::LoadIsMonomorphic(Property* expr) {
+  return IsMonomorphic(expr->position());
+}
+
+
+bool TypeFeedbackOracle:: StoreIsMonomorphic(Assignment* expr) {
+  return IsMonomorphic(expr->position());
+}
+
+
+bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
+  return IsMonomorphic(expr->position());
+}
+
+
+Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
+  ASSERT(LoadIsMonomorphic(expr));
+  return Handle<Map>::cast(GetElement(map_, expr->position()));
+}
+
+
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
+  ASSERT(StoreIsMonomorphic(expr));
+  return Handle<Map>::cast(GetElement(map_, expr->position()));
+}
+
+
+Handle<Map> TypeFeedbackOracle::CallMonomorphicReceiverType(Call* expr) {
+  ASSERT(CallIsMonomorphic(expr));
+  return Handle<Map>::cast(GetElement(map_, expr->position()));
+}
+
+
+ZoneMapList* TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
+                                                   Handle<String> name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
+  return CollectReceiverTypes(expr->position(), name, flags);
+}
+
+
+ZoneMapList* TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
+                                                    Handle<String> name) {
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL);
+  return CollectReceiverTypes(expr->position(), name, flags);
+}
+
+
+ZoneMapList* TypeFeedbackOracle::CallReceiverTypes(Call* expr,
+                                                   Handle<String> name) {
+  int arity = expr->arguments()->length();
+  Code::Flags flags = Code::ComputeMonomorphicFlags(
+      Code::CALL_IC, NORMAL, OWN_MAP, NOT_IN_LOOP, arity);
+  return CollectReceiverTypes(expr->position(), name, flags);
+}
+
+
+bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
+  Handle<Object> object = GetElement(map_, expr->position());
+  return *object == Builtins::builtin(id);
+}
+
+
+TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr, Side side) {
+  Handle<Object> object = GetElement(map_, expr->position());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return unknown;
+
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  switch (state) {
+    case CompareIC::UNINITIALIZED:
+      // Uninitialized means never executed.
+      // TODO(fschneider): Introduce a separate value for never-executed ICs.
+      return unknown;
+    case CompareIC::SMIS:
+      return TypeInfo::Smi();
+    case CompareIC::HEAP_NUMBERS:
+      return TypeInfo::Number();
+    case CompareIC::OBJECTS:
+      // TODO(kasperl): We really need a type for JS objects here.
+      return TypeInfo::NonPrimitive();
+    case CompareIC::GENERIC:
+    default:
+      return unknown;
+  }
+}
+
+
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr, Side side) {
+  Handle<Object> object = GetElement(map_, expr->position());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (code->is_binary_op_stub()) {
+    BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+        code->binary_op_type());
+    switch (type) {
+      case BinaryOpIC::UNINIT_OR_SMI:
+        return TypeInfo::Smi();
+      case BinaryOpIC::DEFAULT:
+        return (expr->op() == Token::DIV || expr->op() == Token::MUL)
+            ? TypeInfo::Double()
+            : TypeInfo::Integer32();
+      case BinaryOpIC::HEAP_NUMBERS:
+        return TypeInfo::Double();
+      default:
+        return unknown;
+    }
+  } else if (code->is_type_recording_binary_op_stub()) {
+    TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
+        code->type_recording_binary_op_type());
+    TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
+        code->type_recording_binary_op_result_type());
+
+    switch (type) {
+      case TRBinaryOpIC::UNINITIALIZED:
+        // Uninitialized means never executed.
+        // TODO(fschneider): Introduce a separate value for never-executed ICs
+        return unknown;
+      case TRBinaryOpIC::SMI:
+        switch (result_type) {
+          case TRBinaryOpIC::UNINITIALIZED:
+          case TRBinaryOpIC::SMI:
+            return TypeInfo::Smi();
+          case TRBinaryOpIC::INT32:
+            return TypeInfo::Integer32();
+          case TRBinaryOpIC::HEAP_NUMBER:
+            return TypeInfo::Double();
+          default:
+            return unknown;
+        }
+      case TRBinaryOpIC::INT32:
+        if (expr->op() == Token::DIV ||
+            result_type == TRBinaryOpIC::HEAP_NUMBER) {
+          return TypeInfo::Double();
+        }
+        return TypeInfo::Integer32();
+      case TRBinaryOpIC::HEAP_NUMBER:
+        return TypeInfo::Double();
+      case TRBinaryOpIC::STRING:
+      case TRBinaryOpIC::GENERIC:
+        return unknown;
+     default:
+        return unknown;
+    }
+  }
+  return unknown;
+}
+
+TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
+  Handle<Object> object = GetElement(map_, clause->position());
+  TypeInfo unknown = TypeInfo::Unknown();
+  if (!object->IsCode()) return unknown;
+  Handle<Code> code = Handle<Code>::cast(object);
+  if (!code->is_compare_ic_stub()) return unknown;
+
+  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+  switch (state) {
+    case CompareIC::UNINITIALIZED:
+      // Uninitialized means never executed.
+      // TODO(fschneider): Introduce a separate value for never-executed ICs.
+      return unknown;
+    case CompareIC::SMIS:
+      return TypeInfo::Smi();
+    case CompareIC::HEAP_NUMBERS:
+      return TypeInfo::Number();
+    case CompareIC::OBJECTS:
+      // TODO(kasperl): We really need a type for JS objects here.
+      return TypeInfo::NonPrimitive();
+    case CompareIC::GENERIC:
+    default:
+      return unknown;
+  }
+}
+
+
+
+ZoneMapList* TypeFeedbackOracle::CollectReceiverTypes(int position,
+                                                      Handle<String> name,
+                                                      Code::Flags flags) {
+  Handle<Object> object = GetElement(map_, position);
+  if (object->IsUndefined()) return NULL;
+
+  if (*object == Builtins::builtin(Builtins::StoreIC_GlobalProxy)) {
+    // TODO(fschneider): We could collect the maps and signal that
+    // we need a generic store (or load) here.
+    ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
+    return NULL;
+  } else if (object->IsMap()) {
+    ZoneMapList* types = new ZoneMapList(1);
+    types->Add(Handle<Map>::cast(object));
+    return types;
+  } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+    ZoneMapList* types = new ZoneMapList(4);
+    ASSERT(object->IsCode());
+    StubCache::CollectMatchingMaps(types, *name, flags);
+    return types->length() > 0 ? types : NULL;
+  } else {
+    return NULL;
+  }
+}
+
+
+void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
+  HandleScope scope;
+
+  const int kInitialCapacity = 16;
+  List<int> code_positions(kInitialCapacity);
+  List<int> source_positions(kInitialCapacity);
+  CollectPositions(*code, &code_positions, &source_positions);
+
+  int length = code_positions.length();
+  ASSERT(source_positions.length() == length);
+  for (int i = 0; i < length; i++) {
+    RelocInfo info(code->instruction_start() + code_positions[i],
+                   RelocInfo::CODE_TARGET, 0);
+    Handle<Code> target(Code::GetCodeFromTargetAddress(info.target_address()));
+    int position = source_positions[i];
+    InlineCacheState state = target->ic_state();
+    Code::Kind kind = target->kind();
+    if (kind == Code::BINARY_OP_IC ||
+        kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
+        kind == Code::COMPARE_IC) {
+      // TODO(kasperl): Avoid having multiple ICs with the same
+      // position by making sure that we have position information
+      // recorded for all binary ICs.
+      if (GetElement(map_, position)->IsUndefined()) {
+        SetElement(map_, position, target);
+      }
+    } else if (state == MONOMORPHIC) {
+      Handle<Map> map = Handle<Map>(target->FindFirstMap());
+      if (*map == NULL) {
+        SetElement(map_, position, target);
+      } else {
+        SetElement(map_, position, map);
+      }
+    } else if (state == MEGAMORPHIC) {
+      SetElement(map_, position, target);
+    }
+  }
+}
+
+
+void TypeFeedbackOracle::CollectPositions(Code* code,
+                                          List<int>* code_positions,
+                                          List<int>* source_positions) {
+  AssertNoAllocation no_allocation;
+  int position = 0;
+  // Because the ICs we use for global variables access in the full
+  // code generator do not have any meaningful positions, we avoid
+  // collecting those by filtering out contextual code targets.
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+      RelocInfo::kPositionMask;
+  for (RelocIterator it(code, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    RelocInfo::Mode mode = info->rmode();
+    if (RelocInfo::IsCodeTarget(mode)) {
+      Code* target = Code::GetCodeFromTargetAddress(info->target_address());
+      if (target->is_inline_cache_stub()) {
+        InlineCacheState state = target->ic_state();
+        Code::Kind kind = target->kind();
+        if (kind == Code::BINARY_OP_IC) {
+          if (target->binary_op_type() == BinaryOpIC::GENERIC) continue;
+        } else if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
+          if (target->type_recording_binary_op_type() ==
+              TRBinaryOpIC::GENERIC) {
+            continue;
+          }
+        } else if (kind == Code::COMPARE_IC) {
+          if (target->compare_state() == CompareIC::GENERIC) continue;
+        } else {
+          if (kind == Code::CALL_IC && state == MONOMORPHIC &&
+              target->check_type() != RECEIVER_MAP_CHECK) continue;
+          if (state != MONOMORPHIC && state != MEGAMORPHIC) continue;
+        }
+        code_positions->Add(
+            static_cast<int>(info->pc() - code->instruction_start()));
+        source_positions->Add(position);
+      }
+    } else {
+      ASSERT(RelocInfo::IsPosition(mode));
+      position = static_cast<int>(info->data());
+    }
+  }
+}
+
 } }  // namespace v8::internal
diff --git a/src/type-info.h b/src/type-info.h
index f588e56..cb3e75d 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -29,47 +29,53 @@
 #define V8_TYPE_INFO_H_
 
 #include "globals.h"
+#include "zone.h"
+#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
-//        Unknown
-//           |
-//      PrimitiveType
-//           |   \--------|
-//         Number      String
-//         /    |         |
-//    Double  Integer32   |
-//        |      |       /
-//        |     Smi     /
-//        |     /      /
-//        Uninitialized.
+//         Unknown
+//           |   |
+//           |   \--------------|
+//      Primitive             Non-primitive
+//           |   \--------|     |
+//         Number      String   |
+//         /    |         |     |
+//    Double  Integer32   |    /
+//        |      |       /    /
+//        |     Smi     /    /
+//        |      |     /    /
+//        |      |    /    /
+//        Uninitialized.--/
 
 class TypeInfo {
  public:
-  TypeInfo() : type_(kUnknownType) { }
+  TypeInfo() : type_(kUninitialized) { }
 
-  static inline TypeInfo Unknown();
+  static TypeInfo Unknown() { return TypeInfo(kUnknown); }
   // We know it's a primitive type.
-  static inline TypeInfo Primitive();
+  static TypeInfo Primitive() { return TypeInfo(kPrimitive); }
   // We know it's a number of some sort.
-  static inline TypeInfo Number();
-  // We know it's signed 32 bit integer.
-  static inline TypeInfo Integer32();
+  static TypeInfo Number() { return TypeInfo(kNumber); }
+  // We know it's a signed 32 bit integer.
+  static TypeInfo Integer32() { return TypeInfo(kInteger32); }
   // We know it's a Smi.
-  static inline TypeInfo Smi();
+  static TypeInfo Smi() { return TypeInfo(kSmi); }
   // We know it's a heap number.
-  static inline TypeInfo Double();
+  static TypeInfo Double() { return TypeInfo(kDouble); }
   // We know it's a string.
-  static inline TypeInfo String();
+  static TypeInfo String() { return TypeInfo(kString); }
+  // We know it's a non-primitive (object) type.
+  static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
   // We haven't started collecting info yet.
-  static inline TypeInfo Uninitialized();
+  static TypeInfo Uninitialized() { return TypeInfo(kUninitialized); }
 
   // Return compact representation.  Very sensitive to enum values below!
-  // Compacting drops information about primtive types and strings types.
+  // Compacting drops information about primitive types and strings types.
   // We use the compact representation when we only care about number types.
   int ThreeBitRepresentation() {
-    ASSERT(type_ != kUninitializedType);
+    ASSERT(type_ != kUninitialized);
     int answer = type_ & 0xf;
     answer = answer > 6 ? answer - 2 : answer;
     ASSERT(answer >= 0);
@@ -82,12 +88,12 @@
     Type t = static_cast<Type>(three_bit_representation > 4 ?
                                three_bit_representation + 2 :
                                three_bit_representation);
-    t = (t == kUnknownType) ? t : static_cast<Type>(t | kPrimitiveType);
-    ASSERT(t == kUnknownType ||
-           t == kNumberType ||
-           t == kInteger32Type ||
-           t == kSmiType ||
-           t == kDoubleType);
+    t = (t == kUnknown) ? t : static_cast<Type>(t | kPrimitive);
+    ASSERT(t == kUnknown ||
+           t == kNumber ||
+           t == kInteger32 ||
+           t == kSmi ||
+           t == kDouble);
     return TypeInfo(t);
   }
 
@@ -97,13 +103,14 @@
 
   static TypeInfo FromInt(int bit_representation) {
     Type t = static_cast<Type>(bit_representation);
-    ASSERT(t == kUnknownType ||
-           t == kPrimitiveType ||
-           t == kNumberType ||
-           t == kInteger32Type ||
-           t == kSmiType ||
-           t == kDoubleType ||
-           t == kStringType);
+    ASSERT(t == kUnknown ||
+           t == kPrimitive ||
+           t == kNumber ||
+           t == kInteger32 ||
+           t == kSmi ||
+           t == kDouble ||
+           t == kString ||
+           t == kNonPrimitive);
     return TypeInfo(t);
   }
 
@@ -113,82 +120,98 @@
   }
 
 
-  // Integer32 is an integer that can be represented as a signed
-  // 32-bit integer. It has to be in the range [-2^31, 2^31 - 1].
-  // We also have to check for negative 0 as it is not an Integer32.
+  // Integer32 is an integer that can be represented as either a signed
+  // 32-bit integer or as an unsigned 32-bit integer. It has to be
+  // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0
+  // as it is not an Integer32.
   static inline bool IsInt32Double(double value) {
     const DoubleRepresentation minus_zero(-0.0);
     DoubleRepresentation rep(value);
     if (rep.bits == minus_zero.bits) return false;
-    if (value >= kMinInt && value <= kMaxInt) {
-      if (value == static_cast<int32_t>(value)) return true;
+    if (value >= kMinInt && value <= kMaxInt &&
+        value == static_cast<int32_t>(value)) {
+      return true;
     }
     return false;
   }
 
   static TypeInfo TypeFromValue(Handle<Object> value);
 
+  bool Equals(const TypeInfo& other) {
+    return type_ == other.type_;
+  }
+
   inline bool IsUnknown() {
-    return type_ == kUnknownType;
+    ASSERT(type_ != kUninitialized);
+    return type_ == kUnknown;
+  }
+
+  inline bool IsPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kPrimitive) == kPrimitive);
   }
 
   inline bool IsNumber() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kNumberType) == kNumberType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kNumber) == kNumber);
   }
 
   inline bool IsSmi() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kSmiType) == kSmiType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSmi) == kSmi);
   }
 
   inline bool IsInteger32() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kInteger32Type) == kInteger32Type);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kInteger32) == kInteger32);
   }
 
   inline bool IsDouble() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kDoubleType) == kDoubleType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kDouble) == kDouble);
   }
 
   inline bool IsString() {
-    ASSERT(type_ != kUninitializedType);
-    return ((type_ & kStringType) == kStringType);
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kString) == kString);
+  }
+
+  inline bool IsNonPrimitive() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kNonPrimitive) == kNonPrimitive);
   }
 
   inline bool IsUninitialized() {
-    return type_ == kUninitializedType;
+    return type_ == kUninitialized;
   }
 
   const char* ToString() {
     switch (type_) {
-      case kUnknownType: return "UnknownType";
-      case kPrimitiveType: return "PrimitiveType";
-      case kNumberType: return "NumberType";
-      case kInteger32Type: return "Integer32Type";
-      case kSmiType: return "SmiType";
-      case kDoubleType: return "DoubleType";
-      case kStringType: return "StringType";
-      case kUninitializedType:
-        UNREACHABLE();
-        return "UninitializedType";
+      case kUnknown: return "Unknown";
+      case kPrimitive: return "Primitive";
+      case kNumber: return "Number";
+      case kInteger32: return "Integer32";
+      case kSmi: return "Smi";
+      case kDouble: return "Double";
+      case kString: return "String";
+      case kNonPrimitive: return "Object";
+      case kUninitialized: return "Uninitialized";
     }
     UNREACHABLE();
     return "Unreachable code";
   }
 
  private:
-  // We use 6 bits to represent the types.
   enum Type {
-    kUnknownType = 0,          // 000000
-    kPrimitiveType = 0x10,     // 010000
-    kNumberType = 0x11,        // 010001
-    kInteger32Type = 0x13,     // 010011
-    kSmiType = 0x17,           // 010111
-    kDoubleType = 0x19,        // 011001
-    kStringType = 0x30,        // 110000
-    kUninitializedType = 0x3f  // 111111
+    kUnknown = 0,          // 0000000
+    kPrimitive = 0x10,     // 0010000
+    kNumber = 0x11,        // 0010001
+    kInteger32 = 0x13,     // 0010011
+    kSmi = 0x17,           // 0010111
+    kDouble = 0x19,        // 0011001
+    kString = 0x30,        // 0110000
+    kNonPrimitive = 0x40,  // 1000000
+    kUninitialized = 0x7f  // 1111111
   };
   explicit inline TypeInfo(Type t) : type_(t) { }
 
@@ -196,44 +219,63 @@
 };
 
 
-TypeInfo TypeInfo::Unknown() {
-  return TypeInfo(kUnknownType);
-}
+// Forward declarations.
+class Assignment;
+class BinaryOperation;
+class Call;
+class CompareOperation;
+class CompilationInfo;
+class Property;
+class CaseClause;
 
+class TypeFeedbackOracle BASE_EMBEDDED {
+ public:
+  enum Side {
+    LEFT,
+    RIGHT,
+    RESULT
+  };
 
-TypeInfo TypeInfo::Primitive() {
-  return TypeInfo(kPrimitiveType);
-}
+  explicit TypeFeedbackOracle(Handle<Code> code);
 
+  bool LoadIsMonomorphic(Property* expr);
+  bool StoreIsMonomorphic(Assignment* expr);
+  bool CallIsMonomorphic(Call* expr);
 
-TypeInfo TypeInfo::Number() {
-  return TypeInfo(kNumberType);
-}
+  Handle<Map> LoadMonomorphicReceiverType(Property* expr);
+  Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
+  Handle<Map> CallMonomorphicReceiverType(Call* expr);
 
+  ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
+  ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
+  ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
 
-TypeInfo TypeInfo::Integer32() {
-  return TypeInfo(kInteger32Type);
-}
+  bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
+  // Get type information for arithmetic operations and compares.
+  TypeInfo BinaryType(BinaryOperation* expr, Side side);
+  TypeInfo CompareType(CompareOperation* expr, Side side);
+  TypeInfo SwitchType(CaseClause* clause);
 
-TypeInfo TypeInfo::Smi() {
-  return TypeInfo(kSmiType);
-}
+ private:
+  void Initialize(Handle<Code> code);
 
+  bool IsMonomorphic(int pos) { return GetElement(map_, pos)->IsMap(); }
 
-TypeInfo TypeInfo::Double() {
-  return TypeInfo(kDoubleType);
-}
+  ZoneMapList* CollectReceiverTypes(int position,
+                                    Handle<String> name,
+                                    Code::Flags flags);
 
+  void PopulateMap(Handle<Code> code);
 
-TypeInfo TypeInfo::String() {
-  return TypeInfo(kStringType);
-}
+  void CollectPositions(Code* code,
+                        List<int>* code_positions,
+                        List<int>* source_positions);
 
+  Handle<JSObject> map_;
 
-TypeInfo TypeInfo::Uninitialized() {
-  return TypeInfo(kUninitializedType);
-}
+  DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
+};
 
 } }  // namespace v8::internal
 
diff --git a/src/utils.cc b/src/utils.cc
index 7096ba3..d0ec4ef 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -45,8 +45,16 @@
 }
 
 
-void Flush() {
-  fflush(stdout);
+void PrintF(FILE* out, const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  OS::VFPrint(out, format, arguments);
+  va_end(arguments);
+}
+
+
+void Flush(FILE* out) {
+  fflush(out);
 }
 
 
@@ -168,6 +176,23 @@
 }
 
 
+int AppendChars(const char* filename,
+                const char* str,
+                int size,
+                bool verbose) {
+  FILE* f = OS::FOpen(filename, "ab");
+  if (f == NULL) {
+    if (verbose) {
+      OS::PrintError("Cannot open file %s for writing.\n", filename);
+    }
+    return 0;
+  }
+  int written = WriteCharsToFile(str, size, f);
+  fclose(f);
+  return written;
+}
+
+
 int WriteChars(const char* filename,
                const char* str,
                int size,
@@ -214,11 +239,16 @@
 
 
 void StringBuilder::AddFormatted(const char* format, ...) {
+  va_list arguments;
+  va_start(arguments, format);
+  AddFormattedList(format, arguments);
+  va_end(arguments);
+}
+
+
+void StringBuilder::AddFormattedList(const char* format, va_list list) {
   ASSERT(!is_finalized() && position_ < buffer_.length());
-  va_list args;
-  va_start(args, format);
-  int n = OS::VSNPrintF(buffer_ + position_, format, args);
-  va_end(args);
+  int n = OS::VSNPrintF(buffer_ + position_, format, list);
   if (n < 0 || n >= (buffer_.length() - position_)) {
     position_ = buffer_.length();
   } else {
diff --git a/src/utils.h b/src/utils.h
index 69c062f..62b8726 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -226,6 +226,11 @@
   static T decode(uint32_t value) {
     return static_cast<T>((value & mask()) >> shift);
   }
+
+  // Value for the field with all bits set.
+  static T max() {
+    return decode(mask());
+  }
 };
 
 
@@ -326,7 +331,7 @@
     return start_[index];
   }
 
-  T& at(int i) const { return operator[](i); }
+  const T& at(int index) const { return operator[](index); }
 
   T& first() { return start_[0]; }
 
@@ -387,11 +392,40 @@
 };
 
 
+// A pointer that can only be set once and doesn't allow NULL values.
+template<typename T>
+class SetOncePointer {
+ public:
+  SetOncePointer() : pointer_(NULL) { }
+
+  bool is_set() const { return pointer_ != NULL; }
+
+  T* get() const {
+    ASSERT(pointer_ != NULL);
+    return pointer_;
+  }
+
+  void set(T* value) {
+    ASSERT(pointer_ == NULL && value != NULL);
+    pointer_ = value;
+  }
+
+ private:
+  T* pointer_;
+};
+
+
 template <typename T, int kSize>
 class EmbeddedVector : public Vector<T> {
  public:
   EmbeddedVector() : Vector<T>(buffer_, kSize) { }
 
+  explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
+    for (int i = 0; i < kSize; ++i) {
+      buffer_[i] = initial_value;
+    }
+  }
+
   // When copying, make underlying Vector to reference our buffer.
   EmbeddedVector(const EmbeddedVector& rhs)
       : Vector<T>(rhs) {
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 60e8741..fa5d581 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -28,7 +28,9 @@
 #ifndef V8_V8_COUNTERS_H_
 #define V8_V8_COUNTERS_H_
 
+#include "allocation.h"
 #include "counters.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -159,7 +161,20 @@
   SC(named_load_global_stub, V8.NamedLoadGlobalStub)                  \
   SC(named_load_global_stub_miss, V8.NamedLoadGlobalStubMiss)         \
   SC(keyed_store_field, V8.KeyedStoreField)                           \
+  SC(named_store_inline_field, V8.NamedStoreInlineField)              \
   SC(keyed_store_inline, V8.KeyedStoreInline)                         \
+  SC(named_load_inline_generic, V8.NamedLoadInlineGeneric)            \
+  SC(named_load_inline_field, V8.NamedLoadInlineFast)                 \
+  SC(keyed_load_inline_generic, V8.KeyedLoadInlineGeneric)            \
+  SC(keyed_load_inline_fast, V8.KeyedLoadInlineFast)                  \
+  SC(named_load_full, V8.NamedLoadFull)                               \
+  SC(keyed_load_full, V8.KeyedLoadFull)                               \
+  SC(keyed_store_inline_generic, V8.KeyedStoreInlineGeneric)          \
+  SC(keyed_store_inline_fast, V8.KeyedStoreInlineFast)                \
+  SC(named_store_inline_generic, V8.NamedStoreInlineGeneric)          \
+  SC(named_store_inline_fast, V8.NamedStoreInlineFast)                \
+  SC(keyed_store_full, V8.KeyedStoreFull)                             \
+  SC(named_store_full, V8.NamedStoreFull)                             \
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
@@ -224,7 +239,25 @@
   SC(math_sqrt, V8.MathSqrt)                                          \
   SC(math_tan, V8.MathTan)                                            \
   SC(transcendental_cache_hit, V8.TranscendentalCacheHit)             \
-  SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)
+  SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)           \
+  SC(stack_interrupts, V8.StackInterrupts)                            \
+  SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                 \
+  SC(other_ticks, V8.OtherTicks)                                      \
+  SC(js_opt_ticks, V8.JsOptTicks)                                     \
+  SC(js_non_opt_ticks, V8.JsNonoptTicks)                              \
+  SC(js_other_ticks, V8.JsOtherTicks)                                 \
+  SC(smi_checks_removed, V8.SmiChecksRemoved)                         \
+  SC(map_checks_removed, V8.MapChecksRemoved)                         \
+  SC(quote_json_char_count, V8.QuoteJsonCharacterCount)               \
+  SC(quote_json_char_recount, V8.QuoteJsonCharacterReCount)           \
+  SC(instance_of, V8.InstanceOf)                                      \
+  SC(instance_of_cache, V8.InstanceOfCache)                           \
+  SC(instance_of_stub_true, V8.InstanceOfStubTrue)                    \
+  SC(instance_of_stub_false, V8.InstanceOfStubFalse)                  \
+  SC(instance_of_stub_false_null, V8.InstanceOfStubFalseNull)         \
+  SC(instance_of_stub_false_string, V8.InstanceOfStubFalseString)     \
+  SC(instance_of_full, V8.InstanceOfFull)                             \
+  SC(instance_of_slow, V8.InstanceOfSlow)
 
 
 // This file contains all the v8 counters that are in use.
diff --git a/src/v8.cc b/src/v8.cc
index c8d719b..f5b6150 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -29,12 +29,16 @@
 
 #include "bootstrapper.h"
 #include "debug.h"
+#include "deoptimizer.h"
+#include "heap-profiler.h"
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "log.h"
+#include "oprofile-agent.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
 #include "simulator.h"
 #include "stub-cache.h"
-#include "heap-profiler.h"
-#include "oprofile-agent.h"
-#include "log.h"
 
 namespace v8 {
 namespace internal {
@@ -43,6 +47,7 @@
 bool V8::has_been_setup_ = false;
 bool V8::has_been_disposed_ = false;
 bool V8::has_fatal_error_ = false;
+bool V8::use_crankshaft_ = true;
 
 
 bool V8::Initialize(Deserializer* des) {
@@ -50,6 +55,9 @@
   if (has_been_disposed_ || has_fatal_error_) return false;
   if (IsRunning()) return true;
 
+  use_crankshaft_ = FLAG_crankshaft;
+  // Peephole optimization might interfere with deoptimization.
+  FLAG_peephole_optimization = !use_crankshaft_;
   is_running_ = true;
   has_been_setup_ = true;
   has_fatal_error_ = false;
@@ -122,6 +130,9 @@
   CPU::Setup();
 
   OProfileAgent::Initialize();
+  Deoptimizer::Setup();
+  LAllocator::Setup();
+  RuntimeProfiler::Setup();
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
@@ -144,6 +155,12 @@
 void V8::TearDown() {
   if (!has_been_setup_ || has_been_disposed_) return;
 
+  if (FLAG_time_hydrogen) HStatistics::Instance()->Print();
+
+  // We must stop the logger before we tear down other components.
+  Logger::EnsureTickerStopped();
+
+  Deoptimizer::TearDown();
   OProfileAgent::TearDown();
 
   if (FLAG_preemption) {
@@ -157,12 +174,11 @@
   Top::TearDown();
 
   HeapProfiler::TearDown();
-
   CpuProfiler::TearDown();
-
-  Heap::TearDown();
+  RuntimeProfiler::TearDown();
 
   Logger::TearDown();
+  Heap::TearDown();
 
   is_running_ = false;
   has_been_disposed_ = true;
diff --git a/src/v8.h b/src/v8.h
index a2313b0..cc1673e 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -66,7 +66,6 @@
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
-#include "vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -84,6 +83,8 @@
   static bool Initialize(Deserializer* des);
   static void TearDown();
   static bool IsRunning() { return is_running_; }
+  static bool UseCrankshaft() { return use_crankshaft_; }
+  static void DisableCrankshaft() { use_crankshaft_ = false; }
   // To be dead you have to have lived
   static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
   static void SetFatalError();
@@ -115,6 +116,8 @@
   // True if engine has been shut down
   // (reset if engine is restarted)
   static bool has_been_disposed_;
+  // True if we are using the crankshaft optimizing compiler.
+  static bool use_crankshaft_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/v8globals.h b/src/v8globals.h
index 2815771..65bbf6a 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -82,6 +82,7 @@
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
+const uint32_t kSlotsZapValue = 0xbeefdeed;
 const uint32_t kDebugZapValue = 0xbadbaddb;
 #endif
 
@@ -285,6 +286,14 @@
 };
 
 
+enum CheckType {
+  RECEIVER_MAP_CHECK,
+  STRING_CHECK,
+  NUMBER_CHECK,
+  BOOLEAN_CHECK
+};
+
+
 enum InLoopFlag {
   NOT_IN_LOOP,
   IN_LOOP
diff --git a/src/v8natives.js b/src/v8natives.js
index 09b296d..9fd2162 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -491,29 +491,28 @@
 }
 
 
-// Converts an array returned from Runtime_GetOwnProperty to an actual
-// property descriptor. For a description of the array layout please
-// see the runtime.cc file.
-function ConvertDescriptorArrayToDescriptor(desc_array) {
-  if (desc_array == false) {
-    throw 'Internal error: invalid desc_array';
-  }
 
-  if (IS_UNDEFINED(desc_array)) {
-    return void 0;
-  }
-
+// ES5 section 8.12.1.
+function GetOwnProperty(obj, p) {
   var desc = new PropertyDescriptor();
-  // This is an accessor.
-  if (desc_array[IS_ACCESSOR_INDEX]) {
-    desc.setGet(desc_array[GETTER_INDEX]);
-    desc.setSet(desc_array[SETTER_INDEX]);
+
+  // GetOwnProperty returns an array indexed by the constants
+  // defined in macros.py.
+  // If p is not a property on obj undefined is returned.
+  var props = %GetOwnProperty(ToObject(obj), ToString(p));
+
+  if (IS_UNDEFINED(props)) return void 0;
+
+  // This is an accessor
+  if (props[IS_ACCESSOR_INDEX]) {
+    desc.setGet(props[GETTER_INDEX]);
+    desc.setSet(props[SETTER_INDEX]);
   } else {
-    desc.setValue(desc_array[VALUE_INDEX]);
-    desc.setWritable(desc_array[WRITABLE_INDEX]);
+    desc.setValue(props[VALUE_INDEX]);
+    desc.setWritable(props[WRITABLE_INDEX]);
   }
-  desc.setEnumerable(desc_array[ENUMERABLE_INDEX]);
-  desc.setConfigurable(desc_array[CONFIGURABLE_INDEX]);
+  desc.setEnumerable(props[ENUMERABLE_INDEX]);
+  desc.setConfigurable(props[CONFIGURABLE_INDEX]);
 
   return desc;
 }
@@ -536,27 +535,9 @@
 }
 
 
-// ES5 section 8.12.1.
-function GetOwnProperty(obj, p) {
-  // GetOwnProperty returns an array indexed by the constants
-  // defined in macros.py.
-  // If p is not a property on obj undefined is returned.
-  var props = %GetOwnProperty(ToObject(obj), ToString(p));
-
-  // A false value here means that access checks failed.
-  if (props == false) return void 0;
-
-  return ConvertDescriptorArrayToDescriptor(props);
-}
-
-
 // ES5 8.12.9.
 function DefineOwnProperty(obj, p, desc, should_throw) {
-  var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
-  // A false value here means that access checks failed.
-  if (current_or_access == false) return void 0;
-
-  var current = ConvertDescriptorArrayToDescriptor(current_or_access);
+  var current = GetOwnProperty(obj, p);
   var extensible = %IsExtensible(ToObject(obj));
 
   // Error handling according to spec.
@@ -582,7 +563,7 @@
     }
 
     // Step 7
-    if (desc.isConfigurable() ||  desc.isEnumerable() != current.isEnumerable())
+    if (desc.isConfigurable() || desc.isEnumerable() != current.isEnumerable())
       throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
     // Step 9
     if (IsDataDescriptor(current) != IsDataDescriptor(desc))
@@ -634,12 +615,20 @@
     } else {
       flag |= READ_ONLY;
     }
-    %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
+    var value = void 0;  // Default value is undefined.
+    if (desc.hasValue()) {
+      value = desc.getValue();
+    } else if (!IS_UNDEFINED(current)) {
+      value = current.getValue();
+    }
+    %DefineOrRedefineDataProperty(obj, p, value, flag);
   } else {
-    if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
+    if (desc.hasGetter() &&
+        (IS_FUNCTION(desc.getGet()) || IS_UNDEFINED(desc.getGet()))) {
        %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
     }
-    if (desc.hasSetter() && IS_FUNCTION(desc.getSet())) {
+    if (desc.hasSetter() &&
+        (IS_FUNCTION(desc.getSet()) || IS_UNDEFINED(desc.getSet()))) {
       %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
     }
   }
@@ -922,19 +911,13 @@
 }
 
 
-function BooleanToJSON(key) {
-  return CheckJSONPrimitive(this.valueOf());
-}
-
-
 // ----------------------------------------------------------------------------
 
 
 function SetupBoolean() {
   InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
     "toString", BooleanToString,
-    "valueOf", BooleanValueOf,
-    "toJSON", BooleanToJSON
+    "valueOf", BooleanValueOf
   ));
 }
 
@@ -1034,18 +1017,6 @@
 }
 
 
-function CheckJSONPrimitive(val) {
-  if (!IsPrimitive(val))
-    throw MakeTypeError('result_not_primitive', ['toJSON', val]);
-  return val;
-}
-
-
-function NumberToJSON(key) {
-  return CheckJSONPrimitive(this.valueOf());
-}
-
-
 // ----------------------------------------------------------------------------
 
 function SetupNumber() {
@@ -1086,15 +1057,13 @@
     "valueOf", NumberValueOf,
     "toFixed", NumberToFixed,
     "toExponential", NumberToExponential,
-    "toPrecision", NumberToPrecision,
-    "toJSON", NumberToJSON
+    "toPrecision", NumberToPrecision
   ));
 }
 
 SetupNumber();
 
 
-
 // ----------------------------------------------------------------------------
 // Function
 
diff --git a/test/mjsunit/regress/regress-3408144.js b/src/v8preparserdll-main.cc
similarity index 83%
copy from test/mjsunit/regress/regress-3408144.js
copy to src/v8preparserdll-main.cc
index 6e292d6..c0344d3 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/src/v8preparserdll-main.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+#include <windows.h>
 
+#include "../include/v8-preparser.h"
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+extern "C" {
+BOOL WINAPI DllMain(HANDLE hinstDLL,
+                    DWORD dwReason,
+                    LPVOID lpvReserved) {
+  // Do nothing.
+  return TRUE;
 }
-
-assertFalse(foo());
+}
diff --git a/src/v8utils.h b/src/v8utils.h
index a907c9f..87efbcf 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -29,6 +29,10 @@
 #define V8_V8UTILS_H_
 
 #include "utils.h"
+#ifdef ANDROID
+// Cherry pick from r6346 to build on Android.
+#include "platform.h"
+#endif
 
 namespace v8 {
 namespace internal {
@@ -42,18 +46,26 @@
 // so it works on MacOSX.
 #if defined(__MACH__) && defined(__APPLE__)
 #define PRINTF_CHECKING
+#define FPRINTF_CHECKING
 #else  // MacOsX.
 #define PRINTF_CHECKING __attribute__ ((format (printf, 1, 2)))
+#define FPRINTF_CHECKING __attribute__ ((format (printf, 2, 3)))
 #endif
 #else
 #define PRINTF_CHECKING
+#define FPRINTF_CHECKING
 #endif
 
 // Our version of printf().
 void PRINTF_CHECKING PrintF(const char* format, ...);
+void FPRINTF_CHECKING PrintF(FILE* out, const char* format, ...);
 
 // Our version of fflush.
-void Flush();
+void Flush(FILE* out);
+
+inline void Flush() {
+  Flush(stdout);
+}
 
 
 // Read a line of characters after printing the prompt to stdout. The resulting
@@ -67,6 +79,14 @@
 byte* ReadBytes(const char* filename, int* size, bool verbose = true);
 
 
+// Append size chars from str to the file given by filename.
+// The file is overwritten. Returns the number of chars written.
+int AppendChars(const char* filename,
+                const char* str,
+                int size,
+                bool verbose = true);
+
+
 // Write size chars from str to the file given by filename.
 // The file is overwritten. Returns the number of chars written.
 int WriteChars(const char* filename,
@@ -217,6 +237,9 @@
   // Add formatted contents to the builder just like printf().
   void AddFormatted(const char* format, ...);
 
+  // Add formatted contents like printf based on a va_list.
+  void AddFormattedList(const char* format, va_list list);
+
   // Add character padding to the builder. If count is non-positive,
   // nothing is added to the builder.
   void AddPadding(char c, int count);
diff --git a/src/variables.cc b/src/variables.cc
index 504e224..c1440b7 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -86,6 +86,18 @@
 }
 
 
+bool Variable::IsParameter() const {
+  Slot* s = AsSlot();
+  return s != NULL && s->type() == Slot::PARAMETER;
+}
+
+
+bool Variable::IsStackLocal() const {
+  Slot* s = AsSlot();
+  return s != NULL && s->type() == Slot::LOCAL;
+}
+
+
 Variable::Variable(Scope* scope,
                    Handle<String> name,
                    Mode mode,
diff --git a/src/variables.h b/src/variables.h
index ec76fee..9e460f7 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -146,6 +146,8 @@
   }
 
   bool IsStackAllocated() const;
+  bool IsParameter() const;  // Includes 'this'.
+  bool IsStackLocal() const;
 
   bool is_dynamic() const {
     return (mode_ == DYNAMIC ||
diff --git a/src/version.cc b/src/version.cc
index 3806e68..d2c0960 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -32,10 +32,10 @@
 // These macros define the version number for the current version.
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
-#define MAJOR_VERSION     2
-#define MINOR_VERSION     5
-#define BUILD_NUMBER      9
-#define PATCH_LEVEL       20
+#define MAJOR_VERSION     3
+#define MINOR_VERSION     0
+#define BUILD_NUMBER      4
+#define PATCH_LEVEL       1
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
@@ -57,12 +57,19 @@
 // Calculate the V8 version string.
 void Version::GetString(Vector<char> str) {
   const char* candidate = IsCandidate() ? " (candidate)" : "";
+#ifdef USE_SIMULATOR
+  const char* is_simulator = " SIMULATOR";
+#else
+  const char* is_simulator = "";
+#endif  // USE_SIMULATOR
   if (GetPatch() > 0) {
-    OS::SNPrintF(str, "%d.%d.%d.%d%s",
-                 GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate);
+    OS::SNPrintF(str, "%d.%d.%d.%d%s%s",
+                 GetMajor(), GetMinor(), GetBuild(), GetPatch(), candidate,
+                 is_simulator);
   } else {
-    OS::SNPrintF(str, "%d.%d.%d%s",
-                 GetMajor(), GetMinor(), GetBuild(), candidate);
+    OS::SNPrintF(str, "%d.%d.%d%s%s",
+                 GetMajor(), GetMinor(), GetBuild(), candidate,
+                 is_simulator);
   }
 }
 
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 74f4a6a..da912b7 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -29,6 +29,7 @@
 #define V8_VM_STATE_INL_H_
 
 #include "vm-state.h"
+#include "runtime-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -49,52 +50,31 @@
       return "COMPILER";
     case OTHER:
       return "OTHER";
+    case EXTERNAL:
+      return "EXTERNAL";
     default:
       UNREACHABLE();
       return NULL;
   }
 }
 
-VMState::VMState(StateTag state)
-    : disabled_(true),
-      state_(OTHER),
-      external_callback_(NULL) {
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Logger::is_logging() && !CpuProfiler::is_profiling()) {
-    return;
-  }
-#endif
-
-  disabled_ = false;
-#if !defined(ENABLE_HEAP_PROTECTION)
-  // When not protecting the heap, there is no difference between
-  // EXTERNAL and OTHER.  As an optimization in that case, we will not
-  // perform EXTERNAL->OTHER transitions through the API.  We thus
-  // compress the two states into one.
-  if (state == EXTERNAL) state = OTHER;
-#endif
-  state_ = state;
-  // Save the previous state.
-  previous_ = Top::current_vm_state();
-  // Install the new state.
-  Top::set_current_vm_state(this);
-
+VMState::VMState(StateTag tag) : previous_tag_(Top::current_vm_state()) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
-    LOG(UncheckedStringEvent("Entering", StateToString(state_)));
-    if (previous_ != NULL) {
-      LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
-    }
+    LOG(UncheckedStringEvent("Entering", StateToString(tag)));
+    LOG(UncheckedStringEvent("From", StateToString(previous_tag_)));
   }
 #endif
 
+  Top::SetCurrentVMState(tag);
+
 #ifdef ENABLE_HEAP_PROTECTION
   if (FLAG_protect_heap) {
-    if (state_ == EXTERNAL) {
+    if (tag == EXTERNAL) {
       // We are leaving V8.
-      ASSERT((previous_ != NULL) && (previous_->state_ != EXTERNAL));
+      ASSERT(previous_tag_ != EXTERNAL);
       Heap::Protect();
-    } else if ((previous_ == NULL) || (previous_->state_ == EXTERNAL)) {
+    } else if (previous_tag_ = EXTERNAL) {
       // We are entering V8.
       Heap::Unprotect();
     }
@@ -104,34 +84,51 @@
 
 
 VMState::~VMState() {
-  if (disabled_) return;
-  // Return to the previous state.
-  Top::set_current_vm_state(previous_);
-
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (FLAG_log_state_changes) {
-    LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
-    if (previous_ != NULL) {
-      LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
-    }
+    LOG(UncheckedStringEvent("Leaving",
+                             StateToString(Top::current_vm_state())));
+    LOG(UncheckedStringEvent("To", StateToString(previous_tag_)));
   }
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 #ifdef ENABLE_HEAP_PROTECTION
+  StateTag tag = Top::current_vm_state();
+#endif
+
+  Top::SetCurrentVMState(previous_tag_);
+
+#ifdef ENABLE_HEAP_PROTECTION
   if (FLAG_protect_heap) {
-    if (state_ == EXTERNAL) {
+    if (tag == EXTERNAL) {
       // We are reentering V8.
-      ASSERT((previous_ != NULL) && (previous_->state_ != EXTERNAL));
+      ASSERT(previous_tag_ != EXTERNAL);
       Heap::Unprotect();
-    } else if ((previous_ == NULL) || (previous_->state_ == EXTERNAL)) {
+    } else if (previous_tag_ == EXTERNAL) {
       // We are leaving V8.
       Heap::Protect();
     }
   }
 #endif  // ENABLE_HEAP_PROTECTION
 }
+
 #endif  // ENABLE_VMSTATE_TRACKING
 
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+ExternalCallbackScope::ExternalCallbackScope(Address callback)
+    : previous_callback_(Top::external_callback()) {
+  Top::set_external_callback(callback);
+}
+
+ExternalCallbackScope::~ExternalCallbackScope() {
+  Top::set_external_callback(previous_callback_);
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_VM_STATE_INL_H_
diff --git a/src/vm-state.h b/src/vm-state.h
index cc91e83..df7fb30 100644
--- a/src/vm-state.h
+++ b/src/vm-state.h
@@ -36,34 +36,11 @@
 class VMState BASE_EMBEDDED {
 #ifdef ENABLE_VMSTATE_TRACKING
  public:
-  inline VMState(StateTag state);
+  inline explicit VMState(StateTag tag);
   inline ~VMState();
 
-  StateTag state() { return state_; }
-  void set_external_callback(Address external_callback) {
-    external_callback_ = external_callback;
-  }
-
-  // Used for debug asserts.
-  static bool is_outermost_external() {
-    return Top::current_vm_state() == 0;
-  }
-
-  static StateTag current_state() {
-    VMState* state = Top::current_vm_state();
-    return state ? state->state() : EXTERNAL;
-  }
-
-  static Address external_callback() {
-    VMState* state = Top::current_vm_state();
-    return state ? state->external_callback_ : NULL;
-  }
-
  private:
-  bool disabled_;
-  StateTag state_;
-  VMState* previous_;
-  Address external_callback_;
+  StateTag previous_tag_;
 
 #else
  public:
@@ -71,6 +48,20 @@
 #endif
 };
 
+
+class ExternalCallbackScope BASE_EMBEDDED {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ public:
+  inline explicit ExternalCallbackScope(Address callback);
+  inline ~ExternalCallbackScope();
+ private:
+  Address previous_callback_;
+#else
+ public:
+  explicit ExternalCallbackScope(Address callback) {}
+#endif
+};
+
 } }  // namespace v8::internal
 
 
diff --git a/src/win32-headers.h b/src/win32-headers.h
new file mode 100644
index 0000000..b51a38a
--- /dev/null
+++ b/src/win32-headers.h
@@ -0,0 +1,95 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef WIN32_LEAN_AND_MEAN
+// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
+#define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#ifndef NOKERNEL
+#define NOKERNEL
+#endif
+#ifndef NOUSER
+#define NOUSER
+#endif
+#ifndef NOSERVICE
+#define NOSERVICE
+#endif
+#ifndef NOSOUND
+#define NOSOUND
+#endif
+#ifndef NOMCX
+#define NOMCX
+#endif
+// Require Windows XP or higher (this is required for the RtlCaptureContext
+// function to be present).
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif
+
+#include <windows.h>
+
+#ifdef V8_WIN32_HEADERS_FULL
+#include <time.h>  // For LocalOffset() implementation.
+#include <mmsystem.h>  // For timeGetTime().
+#ifdef __MINGW32__
+// Require Windows XP or higher when compiling with MinGW. This is for MinGW
+// header files to expose getaddrinfo.
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x501
+#endif  // __MINGW32__
+#ifndef __MINGW32__
+#include <dbghelp.h>  // For SymLoadModule64 and al.
+#endif  // __MINGW32__
+#include <limits.h>  // For INT_MAX and al.
+#include <tlhelp32.h>  // For Module32First and al.
+
+// These additional WIN32 includes have to be right here as the #undef's below
+// makes it impossible to have them elsewhere.
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <process.h>  // for _beginthreadex()
+#include <stdlib.h>
+#endif  // V8_WIN32_HEADERS_FULL
+
+#undef VOID
+#undef DELETE
+#undef IN
+#undef THIS
+#undef CONST
+#undef NAN
+#undef TRUE
+#undef FALSE
+#undef UNKNOWN
+#undef NONE
+#undef ANY
+#undef IGNORE
+#undef GetObject
+#undef CreateMutex
+#undef CreateSemaphore
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 44159e0..1fe9eed 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -274,6 +274,30 @@
 }
 
 
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<JSGlobalPropertyCell>(
+      reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = Memory::Address_at(pc_);
+  Object* object = HeapObject::FromAddress(
+      address - JSGlobalPropertyCell::kValueOffset);
+  return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+  ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+  Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+}
+
+
 bool RelocInfo::IsPatchedReturnSequence() {
   // The recognized call sequence is:
   //  movq(kScratchRegister, immediate64); call(kScratchRegister);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index caed7c8..8f15f23 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,10 +44,10 @@
 uint64_t CpuFeatures::enabled_ = 0;
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
-void CpuFeatures::Probe()  {
+void CpuFeatures::Probe(bool portable)  {
   ASSERT(Heap::HasBeenSetup());
-  ASSERT(supported_ == kDefaultCpuFeatures);
-  if (Serializer::enabled()) {
+  supported_ = kDefaultCpuFeatures;
+  if (portable && Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
@@ -133,7 +133,7 @@
   found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= ~os_guarantees;
+  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
   // SSE2 and CMOV must be available on an X64 CPU.
   ASSERT(IsSupported(CPUID));
   ASSERT(IsSupported(SSE2));
@@ -821,6 +821,7 @@
 
 
 void Assembler::call(Label* L) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
@@ -852,6 +853,7 @@
 
 
 void Assembler::call(Register adr) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: FF /2 r64.
@@ -862,6 +864,7 @@
 
 
 void Assembler::call(const Operand& op) {
+  positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: FF /2 m64.
@@ -2217,6 +2220,14 @@
 }
 
 
+void Assembler::fldln2() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xED);
+}
+
+
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2358,6 +2369,14 @@
 }
 
 
+void Assembler::fyl2x() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xD9);
+  emit(0xF1);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2917,6 +2936,12 @@
 }
 
 
+void Assembler::dd(uint32_t data) {
+  EnsureSpace ensure_space(this);
+  emitl(data);
+}
+
+
 // Relocation information implementations.
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@@ -2946,7 +2971,7 @@
 
 
 void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
+  if (FLAG_code_comments) {
     EnsureSpace ensure_space(this);
     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   }
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index c7f7632..fde88df 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A lightweight X64 Assembler.
 
@@ -88,11 +88,38 @@
 //
 
 struct Register {
+  // The non-allocatable registers are:
+  //  rsp - stack pointer
+  //  rbp - frame pointer
+  //  rsi - context register
+  //  r10 - fixed scratch register
+  //  r13 - root register
+  //  r15 - smi constant register
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 10;
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "rax",
+      "rcx",
+      "rdx",
+      "rbx",
+      "rdi",
+      "r8",
+      "r9",
+      "r11",
+      "r12",
+      "r14"
+    };
+    return names[index];
+  }
+
   static Register toRegister(int code) {
     Register r = { code };
     return r;
   }
-  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
     ASSERT(is_valid());
@@ -138,7 +165,37 @@
 
 
 struct XMMRegister {
-  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  static const int kNumRegisters = 16;
+  static const int kNumAllocatableRegisters = 15;
+
+  static int ToAllocationIndex(XMMRegister reg) {
+    ASSERT(reg.code() != 0);
+    return reg.code() - 1;
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+    const char* const names[] = {
+      "xmm1",
+      "xmm2",
+      "xmm3",
+      "xmm4",
+      "xmm5",
+      "xmm6",
+      "xmm7",
+      "xmm8",
+      "xmm9",
+      "xmm10",
+      "xmm11",
+      "xmm12",
+      "xmm13",
+      "xmm14",
+      "xmm15"
+    };
+    return names[index];
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -175,6 +232,10 @@
 const XMMRegister xmm14 = { 14 };
 const XMMRegister xmm15 = { 15 };
 
+
+typedef XMMRegister DoubleRegister;
+
+
 enum Condition {
   // any value < 0 is considered no_condition
   no_condition  = -1,
@@ -345,7 +406,7 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool portable);
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
     if (f == SSE2 && !FLAG_enable_sse2) return false;
@@ -1046,6 +1107,7 @@
   void fld1();
   void fldz();
   void fldpi();
+  void fldln2();
 
   void fld_s(const Operand& adr);
   void fld_d(const Operand& adr);
@@ -1100,6 +1162,7 @@
 
   void fsin();
   void fcos();
+  void fyl2x();
 
   void frndint();
 
@@ -1171,9 +1234,14 @@
   void RecordDebugBreakSlot();
 
   // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
+  // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
+  // Writes a single word of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  void db(uint8_t data) { UNIMPLEMENTED(); }
+  void dd(uint32_t data);
+
   int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 0dead6b..456d076 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,11 +30,13 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "codegen-inl.h"
-#include "macro-assembler.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
 
 namespace v8 {
 namespace internal {
 
+
 #define __ ACCESS_MASM(masm)
 
 
@@ -71,817 +73,6 @@
 }
 
 
-static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
-  __ push(rbp);
-  __ movq(rbp, rsp);
-
-  // Store the arguments adaptor context sentinel.
-  __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-
-  // Push the function on the stack.
-  __ push(rdi);
-
-  // Preserve the number of arguments on the stack. Must preserve both
-  // rax and rbx because these registers are used when copying the
-  // arguments and the receiver.
-  __ Integer32ToSmi(rcx, rax);
-  __ push(rcx);
-}
-
-
-static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
-  // Retrieve the number of arguments from the stack. Number is a Smi.
-  __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  // Leave the frame.
-  __ movq(rsp, rbp);
-  __ pop(rbp);
-
-  // Remove caller arguments from the stack.
-  __ pop(rcx);
-  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
-  __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
-  __ push(rcx);
-}
-
-
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax : actual number of arguments
-  //  -- rbx : expected number of arguments
-  //  -- rdx : code entry to call
-  // -----------------------------------
-
-  Label invoke, dont_adapt_arguments;
-  __ IncrementCounter(&Counters::arguments_adaptors, 1);
-
-  Label enough, too_few;
-  __ cmpq(rax, rbx);
-  __ j(less, &too_few);
-  __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
-  __ j(equal, &dont_adapt_arguments);
-
-  {  // Enough parameters: Actual >= expected.
-    __ bind(&enough);
-    EnterArgumentsAdaptorFrame(masm);
-
-    // Copy receiver and all expected arguments.
-    const int offset = StandardFrameConstants::kCallerSPOffset;
-    __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
-    __ movq(rcx, Immediate(-1));  // account for receiver
-
-    Label copy;
-    __ bind(&copy);
-    __ incq(rcx);
-    __ push(Operand(rax, 0));
-    __ subq(rax, Immediate(kPointerSize));
-    __ cmpq(rcx, rbx);
-    __ j(less, &copy);
-    __ jmp(&invoke);
-  }
-
-  {  // Too few parameters: Actual < expected.
-    __ bind(&too_few);
-    EnterArgumentsAdaptorFrame(masm);
-
-    // Copy receiver and all actual arguments.
-    const int offset = StandardFrameConstants::kCallerSPOffset;
-    __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
-    __ movq(rcx, Immediate(-1));  // account for receiver
-
-    Label copy;
-    __ bind(&copy);
-    __ incq(rcx);
-    __ push(Operand(rdi, 0));
-    __ subq(rdi, Immediate(kPointerSize));
-    __ cmpq(rcx, rax);
-    __ j(less, &copy);
-
-    // Fill remaining expected arguments with undefined values.
-    Label fill;
-    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-    __ bind(&fill);
-    __ incq(rcx);
-    __ push(kScratchRegister);
-    __ cmpq(rcx, rbx);
-    __ j(less, &fill);
-
-    // Restore function pointer.
-    __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  }
-
-  // Call the entry point.
-  __ bind(&invoke);
-  __ call(rdx);
-
-  // Leave frame and return.
-  LeaveArgumentsAdaptorFrame(masm);
-  __ ret(0);
-
-  // -------------------------------------------
-  // Dont adapt arguments.
-  // -------------------------------------------
-  __ bind(&dont_adapt_arguments);
-  __ jmp(rdx);
-}
-
-
-void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
-  // Stack Layout:
-  // rsp[0]:   Return address
-  // rsp[1]:   Argument n
-  // rsp[2]:   Argument n-1
-  //  ...
-  // rsp[n]:   Argument 1
-  // rsp[n+1]: Receiver (function to call)
-  //
-  // rax contains the number of arguments, n, not counting the receiver.
-  //
-  // 1. Make sure we have at least one argument.
-  { Label done;
-    __ testq(rax, rax);
-    __ j(not_zero, &done);
-    __ pop(rbx);
-    __ Push(Factory::undefined_value());
-    __ push(rbx);
-    __ incq(rax);
-    __ bind(&done);
-  }
-
-  // 2. Get the function to call (passed as receiver) from the stack, check
-  //    if it is a function.
-  Label non_function;
-  // The function to call is at position n+1 on the stack.
-  __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
-  __ JumpIfSmi(rdi, &non_function);
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &non_function);
-
-  // 3a. Patch the first argument if necessary when calling a function.
-  Label shift_arguments;
-  { Label convert_to_object, use_global_receiver, patch_receiver;
-    // Change context eagerly in case we need the global receiver.
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-    __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
-    __ JumpIfSmi(rbx, &convert_to_object);
-
-    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-    __ j(equal, &use_global_receiver);
-    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-    __ j(equal, &use_global_receiver);
-
-    __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-    __ j(below, &convert_to_object);
-    __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-    __ j(below_equal, &shift_arguments);
-
-    __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ Integer32ToSmi(rax, rax);
-    __ push(rax);
-
-    __ push(rbx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ movq(rbx, rax);
-
-    __ pop(rax);
-    __ SmiToInteger32(rax, rax);
-    __ LeaveInternalFrame();
-    // Restore the function to rdi.
-    __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
-    __ jmp(&patch_receiver);
-
-    // Use the global receiver object from the called function as the
-    // receiver.
-    __ bind(&use_global_receiver);
-    const int kGlobalIndex =
-        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-    __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
-    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
-    __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
-    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
-    __ bind(&patch_receiver);
-    __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
-
-    __ jmp(&shift_arguments);
-  }
-
-
-  // 3b. Patch the first argument when calling a non-function.  The
-  //     CALL_NON_FUNCTION builtin expects the non-function callee as
-  //     receiver, so overwrite the first argument which will ultimately
-  //     become the receiver.
-  __ bind(&non_function);
-  __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
-  __ xor_(rdi, rdi);
-
-  // 4. Shift arguments and return address one slot down on the stack
-  //    (overwriting the original receiver).  Adjust argument count to make
-  //    the original first argument the new receiver.
-  __ bind(&shift_arguments);
-  { Label loop;
-    __ movq(rcx, rax);
-    __ bind(&loop);
-    __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
-    __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
-    __ decq(rcx);
-    __ j(not_sign, &loop);  // While non-negative (to copy return address).
-    __ pop(rbx);  // Discard copy of return address.
-    __ decq(rax);  // One fewer argument (first argument is new receiver).
-  }
-
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
-  { Label function;
-    __ testq(rdi, rdi);
-    __ j(not_zero, &function);
-    __ xor_(rbx, rbx);
-    __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
-    __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
-            RelocInfo::CODE_TARGET);
-    __ bind(&function);
-  }
-
-  // 5b. Get the code to call from the function and check that the number of
-  //     expected arguments matches what we're providing.  If so, jump
-  //     (tail-call) to the code in register edx without checking arguments.
-  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ movsxlq(rbx,
-             FieldOperand(rdx,
-                          SharedFunctionInfo::kFormalParameterCountOffset));
-  __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-  __ cmpq(rax, rbx);
-  __ j(not_equal,
-       Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
-       RelocInfo::CODE_TARGET);
-
-  ParameterCount expected(0);
-  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
-}
-
-
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
-  // Stack at entry:
-  //    rsp: return address
-  //  rsp+8: arguments
-  // rsp+16: receiver ("this")
-  // rsp+24: function
-  __ EnterInternalFrame();
-  // Stack frame:
-  //    rbp: Old base pointer
-  // rbp[1]: return address
-  // rbp[2]: function arguments
-  // rbp[3]: receiver
-  // rbp[4]: function
-  static const int kArgumentsOffset = 2 * kPointerSize;
-  static const int kReceiverOffset = 3 * kPointerSize;
-  static const int kFunctionOffset = 4 * kPointerSize;
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(Operand(rbp, kArgumentsOffset));
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
-  // Check the stack for overflow. We are not trying need to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
-  __ movq(rcx, rsp);
-  // Make rcx the space we have left. The stack might already be overflowed
-  // here which will cause rcx to become negative.
-  __ subq(rcx, kScratchRegister);
-  // Make rdx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-  // Check if the arguments will overflow the stack.
-  __ cmpq(rcx, rdx);
-  __ j(greater, &okay);  // Signed comparison.
-
-  // Out of stack space.
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(rax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
-
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(rax);  // limit
-  __ push(Immediate(0));  // index
-
-  // Change context eagerly to get the right global object if
-  // necessary.
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-  // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
-  __ movq(rbx, Operand(rbp, kReceiverOffset));
-  __ JumpIfSmi(rbx, &call_to_object);
-  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-  __ j(equal, &use_global_receiver);
-  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &use_global_receiver);
-
-  // If given receiver is already a JavaScript object then there's no
-  // reason for converting it.
-  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-  __ j(below, &call_to_object);
-  __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-  __ j(below_equal, &push_receiver);
-
-  // Convert the receiver to an object.
-  __ bind(&call_to_object);
-  __ push(rbx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ movq(rbx, rax);
-  __ jmp(&push_receiver);
-
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
-  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
-
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(rbx);
-
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
-
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
-
-  // Push the nth argument.
-  __ push(rax);
-
-  // Update the index on the stack and in register rax.
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  __ movq(Operand(rbp, kIndexOffset), rax);
-
-  __ bind(&entry);
-  __ cmpq(rax, Operand(rbp, kLimitOffset));
-  __ j(not_equal, &loop);
-
-  // Invoke the function.
-  ParameterCount actual(rax);
-  __ SmiToInteger32(rax, rax);
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove function, receiver, and arguments
-}
-
-
-// Load the built-in Array function from the current context.
-static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
-  // Load the global context.
-  __ movq(result, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ movq(result, FieldOperand(result, GlobalObject::kGlobalContextOffset));
-  // Load the Array function from the global context.
-  __ movq(result,
-          Operand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
-// Number of empty elements to allocate for an empty array.
-static const int kPreallocatedArrayElements = 4;
-
-
-// Allocate an empty JSArray. The allocated array is put into the result
-// register. If the parameter initial_capacity is larger than zero an elements
-// backing store is allocated with this size and filled with the hole values.
-// Otherwise the elements backing store is set to the empty FixedArray.
-static void AllocateEmptyJSArray(MacroAssembler* masm,
-                                 Register array_function,
-                                 Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register scratch3,
-                                 int initial_capacity,
-                                 Label* gc_required) {
-  ASSERT(initial_capacity >= 0);
-
-  // Load the initial map from the array function.
-  __ movq(scratch1, FieldOperand(array_function,
-                                 JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Allocate the JSArray object together with space for a fixed array with the
-  // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
-  __ AllocateInNewSpace(size,
-                        result,
-                        scratch2,
-                        scratch3,
-                        gc_required,
-                        TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // scratch1: initial map
-  // scratch2: start of next object
-  __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
-  __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
-          Factory::empty_fixed_array());
-  // Field JSArray::kElementsOffset is initialized later.
-  __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
-
-  // If no storage is requested for the elements array just set the empty
-  // fixed array.
-  if (initial_capacity == 0) {
-    __ Move(FieldOperand(result, JSArray::kElementsOffset),
-            Factory::empty_fixed_array());
-    return;
-  }
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // scratch2: start of next object
-  __ lea(scratch1, Operand(result, JSArray::kSize));
-  __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
-
-  // Initialize the FixedArray and fill it with holes. FixedArray length is
-  // stored as a smi.
-  // result: JSObject
-  // scratch1: elements array
-  // scratch2: start of next object
-  __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
-          Factory::fixed_array_map());
-  __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
-          Smi::FromInt(initial_capacity));
-
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
-  static const int kLoopUnfoldLimit = 4;
-  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
-  __ Move(scratch3, Factory::the_hole_value());
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    // Use a scratch register here to have only one reloc info when unfolding
-    // the loop.
-    for (int i = 0; i < initial_capacity; i++) {
-      __ movq(FieldOperand(scratch1,
-                           FixedArray::kHeaderSize + i * kPointerSize),
-              scratch3);
-    }
-  } else {
-    Label loop, entry;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(Operand(scratch1, 0), scratch3);
-    __ addq(scratch1, Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmpq(scratch1, scratch2);
-    __ j(below, &loop);
-  }
-}
-
-
-// Allocate a JSArray with the number of elements stored in a register. The
-// register array_function holds the built-in Array function and the register
-// array_size holds the size of the array as a smi. The allocated array is put
-// into the result register and beginning and end of the FixedArray elements
-// storage is put into registers elements_array and elements_array_end  (see
-// below for when that is not the case). If the parameter fill_with_holes is
-// true the allocated elements backing store is filled with the hole values
-// otherwise it is left uninitialized. When the backing store is filled the
-// register elements_array is scratched.
-static void AllocateJSArray(MacroAssembler* masm,
-                            Register array_function,  // Array function.
-                            Register array_size,  // As a smi.
-                            Register result,
-                            Register elements_array,
-                            Register elements_array_end,
-                            Register scratch,
-                            bool fill_with_hole,
-                            Label* gc_required) {
-  Label not_empty, allocated;
-
-  // Load the initial map from the array function.
-  __ movq(elements_array,
-          FieldOperand(array_function,
-                       JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Check whether an empty sized array is requested.
-  __ testq(array_size, array_size);
-  __ j(not_zero, &not_empty);
-
-  // If an empty array is requested allocate a small elements array anyway. This
-  // keeps the code below free of special casing for the empty array.
-  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
-  __ AllocateInNewSpace(size,
-                        result,
-                        elements_array_end,
-                        scratch,
-                        gc_required,
-                        TAG_OBJECT);
-  __ jmp(&allocated);
-
-  // Allocate the JSArray object together with space for a FixedArray with the
-  // requested elements.
-  __ bind(&not_empty);
-  SmiIndex index =
-      masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
-  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
-                        index.scale,
-                        index.reg,
-                        result,
-                        elements_array_end,
-                        scratch,
-                        gc_required,
-                        TAG_OBJECT);
-
-  // Allocated the JSArray. Now initialize the fields except for the elements
-  // array.
-  // result: JSObject
-  // elements_array: initial map
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ bind(&allocated);
-  __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
-  __ Move(elements_array, Factory::empty_fixed_array());
-  __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
-  // Field JSArray::kElementsOffset is initialized later.
-  __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
-
-  // Calculate the location of the elements array and set elements array member
-  // of the JSArray.
-  // result: JSObject
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ lea(elements_array, Operand(result, JSArray::kSize));
-  __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
-
-  // Initialize the fixed array. FixedArray length is stored as a smi.
-  // result: JSObject
-  // elements_array: elements array
-  // elements_array_end: start of next object
-  // array_size: size of array (smi)
-  __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
-          Factory::fixed_array_map());
-  Label not_empty_2, fill_array;
-  __ SmiTest(array_size);
-  __ j(not_zero, &not_empty_2);
-  // Length of the FixedArray is the number of pre-allocated elements even
-  // though the actual JSArray has length 0.
-  __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
-          Smi::FromInt(kPreallocatedArrayElements));
-  __ jmp(&fill_array);
-  __ bind(&not_empty_2);
-  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
-  // same.
-  __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
-
-  // Fill the allocated FixedArray with the hole value if requested.
-  // result: JSObject
-  // elements_array: elements array
-  // elements_array_end: start of next object
-  __ bind(&fill_array);
-  if (fill_with_hole) {
-    Label loop, entry;
-    __ Move(scratch, Factory::the_hole_value());
-    __ lea(elements_array, Operand(elements_array,
-                                   FixedArray::kHeaderSize - kHeapObjectTag));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(Operand(elements_array, 0), scratch);
-    __ addq(elements_array, Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmpq(elements_array, elements_array_end);
-    __ j(below, &loop);
-  }
-}
-
-
-// Create a new array for the built-in Array function. This function allocates
-// the JSArray object and the FixedArray elements array and initializes these.
-// If the Array cannot be constructed in native code the runtime is called. This
-// function assumes the following state:
-//   rdi: constructor (built-in Array function)
-//   rax: argc
-//   rsp[0]: return address
-//   rsp[8]: last argument
-// This function is used for both construct and normal calls of Array. The only
-// difference between handling a construct call and a normal call is that for a
-// construct call the constructor function in rdi needs to be preserved for
-// entering the generic code. In both cases argc in rax needs to be preserved.
-// Both registers are preserved by this code so no need to differentiate between
-// a construct call and a normal call.
-static void ArrayNativeCode(MacroAssembler* masm,
-                            Label *call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more;
-
-  // Check for array construction with zero arguments.
-  __ testq(rax, rax);
-  __ j(not_zero, &argc_one_or_more);
-
-  // Handle construction of an empty array.
-  AllocateEmptyJSArray(masm,
-                       rdi,
-                       rbx,
-                       rcx,
-                       rdx,
-                       r8,
-                       kPreallocatedArrayElements,
-                       call_generic_code);
-  __ IncrementCounter(&Counters::array_function_native, 1);
-  __ movq(rax, rbx);
-  __ ret(kPointerSize);
-
-  // Check for one argument. Bail out if argument is not smi or if it is
-  // negative.
-  __ bind(&argc_one_or_more);
-  __ cmpq(rax, Immediate(1));
-  __ j(not_equal, &argc_two_or_more);
-  __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
-  __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
-
-  // Handle construction of an empty array of a certain size. Bail out if size
-  // is to large to actually allocate an elements array.
-  __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
-  __ j(greater_equal, call_generic_code);
-
-  // rax: argc
-  // rdx: array_size (smi)
-  // rdi: constructor
-  // esp[0]: return address
-  // esp[8]: argument
-  AllocateJSArray(masm,
-                  rdi,
-                  rdx,
-                  rbx,
-                  rcx,
-                  r8,
-                  r9,
-                  true,
-                  call_generic_code);
-  __ IncrementCounter(&Counters::array_function_native, 1);
-  __ movq(rax, rbx);
-  __ ret(2 * kPointerSize);
-
-  // Handle construction of an array from a list of arguments.
-  __ bind(&argc_two_or_more);
-  __ movq(rdx, rax);
-  __ Integer32ToSmi(rdx, rdx);  // Convet argc to a smi.
-  // rax: argc
-  // rdx: array_size (smi)
-  // rdi: constructor
-  // esp[0] : return address
-  // esp[8] : last argument
-  AllocateJSArray(masm,
-                  rdi,
-                  rdx,
-                  rbx,
-                  rcx,
-                  r8,
-                  r9,
-                  false,
-                  call_generic_code);
-  __ IncrementCounter(&Counters::array_function_native, 1);
-
-  // rax: argc
-  // rbx: JSArray
-  // rcx: elements_array
-  // r8: elements_array_end (untagged)
-  // esp[0]: return address
-  // esp[8]: last argument
-
-  // Location of the last argument
-  __ lea(r9, Operand(rsp, kPointerSize));
-
-  // Location of the first array element (Parameter fill_with_holes to
-  // AllocateJSArrayis false, so the FixedArray is returned in rcx).
-  __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
-
-  // rax: argc
-  // rbx: JSArray
-  // rdx: location of the first array element
-  // r9: location of the last argument
-  // esp[0]: return address
-  // esp[8]: last argument
-  Label loop, entry;
-  __ movq(rcx, rax);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
-  __ movq(Operand(rdx, 0), kScratchRegister);
-  __ addq(rdx, Immediate(kPointerSize));
-  __ bind(&entry);
-  __ decq(rcx);
-  __ j(greater_equal, &loop);
-
-  // Remove caller arguments from the stack and return.
-  // rax: argc
-  // rbx: JSArray
-  // esp[0]: return address
-  // esp[8]: last argument
-  __ pop(rcx);
-  __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
-  __ push(rcx);
-  __ movq(rax, rbx);
-  __ ret(0);
-}
-
-
-void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax : argc
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : last argument
-  // -----------------------------------
-  Label generic_array_code;
-
-  // Get the Array function.
-  GenerateLoadArrayFunction(masm, rdi);
-
-  if (FLAG_debug_code) {
-    // Initial map for the builtin Array function shoud be a map.
-    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi.
-    ASSERT(kSmiTag == 0);
-    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
-    __ Check(not_smi, "Unexpected initial map for Array function");
-    __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Check(equal, "Unexpected initial map for Array function");
-  }
-
-  // Run the native code for the Array function called as a normal function.
-  ArrayNativeCode(masm, &generic_array_code);
-
-  // Jump to the generic array code in case the specialized code cannot handle
-  // the construction.
-  __ bind(&generic_array_code);
-  Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
-  Handle<Code> array_code(code);
-  __ Jump(array_code, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax : argc
-  //  -- rdi : constructor
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : last argument
-  // -----------------------------------
-  Label generic_constructor;
-
-  if (FLAG_debug_code) {
-    // The array construct code is only set for the builtin Array function which
-    // does always have a map.
-    GenerateLoadArrayFunction(masm, rbx);
-    __ cmpq(rdi, rbx);
-    __ Check(equal, "Unexpected Array function");
-    // Initial map for the builtin Array function should be a map.
-    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi.
-    ASSERT(kSmiTag == 0);
-    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
-    __ Check(not_smi, "Unexpected initial map for Array function");
-    __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Check(equal, "Unexpected initial map for Array function");
-  }
-
-  // Run the native code for the Array function called as constructor.
-  ArrayNativeCode(masm, &generic_constructor);
-
-  // Jump to the generic construct code in case the specialized code cannot
-  // handle the construction.
-  __ bind(&generic_constructor);
-  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
-  Handle<Code> generic_construct_stub(code);
-  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
-}
-
-
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
-  // TODO(849): implement custom construct stub.
-  // Generate a copy of the generic stub for now.
-  Generate_JSConstructStubGeneric(masm);
-}
-
-
 void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
@@ -1347,6 +538,854 @@
   __ jmp(rcx);
 }
 
+
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push a copy of the function onto the stack.
+  __ push(rdi);
+
+  __ push(rdi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+
+  // Restore function and tear down temporary frame.
+  __ pop(rdi);
+  __ LeaveInternalFrame();
+
+  // Do a tail-call of the compiled function.
+  __ lea(rcx, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rcx);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  __ int3();
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+  __ int3();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // Stack Layout:
+  // rsp[0]:   Return address
+  // rsp[1]:   Argument n
+  // rsp[2]:   Argument n-1
+  //  ...
+  // rsp[n]:   Argument 1
+  // rsp[n+1]: Receiver (function to call)
+  //
+  // rax contains the number of arguments, n, not counting the receiver.
+  //
+  // 1. Make sure we have at least one argument.
+  { Label done;
+    __ testq(rax, rax);
+    __ j(not_zero, &done);
+    __ pop(rbx);
+    __ Push(Factory::undefined_value());
+    __ push(rbx);
+    __ incq(rax);
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  Label non_function;
+  // The function to call is at position n+1 on the stack.
+  __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+  __ JumpIfSmi(rdi, &non_function);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &non_function);
+
+  // 3a. Patch the first argument if necessary when calling a function.
+  Label shift_arguments;
+  { Label convert_to_object, use_global_receiver, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+    __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
+    __ JumpIfSmi(rbx, &convert_to_object);
+
+    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+    __ j(equal, &use_global_receiver);
+    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, &use_global_receiver);
+
+    __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+    __ j(below, &convert_to_object);
+    __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+    __ j(below_equal, &shift_arguments);
+
+    __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ Integer32ToSmi(rax, rax);
+    __ push(rax);
+
+    __ push(rbx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ movq(rbx, rax);
+
+    __ pop(rax);
+    __ SmiToInteger32(rax, rax);
+    __ LeaveInternalFrame();
+    // Restore the function to rdi.
+    __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+    __ jmp(&patch_receiver);
+
+    // Use the global receiver object from the called function as the
+    // receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalIndex =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+    __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+    __ bind(&patch_receiver);
+    __ movq(Operand(rsp, rax, times_pointer_size, 0), rbx);
+
+    __ jmp(&shift_arguments);
+  }
+
+
+  // 3b. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  __ bind(&non_function);
+  __ movq(Operand(rsp, rax, times_pointer_size, 0), rdi);
+  __ xor_(rdi, rdi);
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  __ bind(&shift_arguments);
+  { Label loop;
+    __ movq(rcx, rax);
+    __ bind(&loop);
+    __ movq(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+    __ movq(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
+    __ decq(rcx);
+    __ j(not_sign, &loop);  // While non-negative (to copy return address).
+    __ pop(rbx);  // Discard copy of return address.
+    __ decq(rax);  // One fewer argument (first argument is new receiver).
+  }
+
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  { Label function;
+    __ testq(rdi, rdi);
+    __ j(not_zero, &function);
+    __ xor_(rbx, rbx);
+    __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
+    __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+            RelocInfo::CODE_TARGET);
+    __ bind(&function);
+  }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movsxlq(rbx,
+             FieldOperand(rdx,
+                          SharedFunctionInfo::kFormalParameterCountOffset));
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+  __ cmpq(rax, rbx);
+  __ j(not_equal,
+       Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+       RelocInfo::CODE_TARGET);
+
+  ParameterCount expected(0);
+  __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  // Stack at entry:
+  //    rsp: return address
+  //  rsp+8: arguments
+  // rsp+16: receiver ("this")
+  // rsp+24: function
+  __ EnterInternalFrame();
+  // Stack frame:
+  //    rbp: Old base pointer
+  // rbp[1]: return address
+  // rbp[2]: function arguments
+  // rbp[3]: receiver
+  // rbp[4]: function
+  static const int kArgumentsOffset = 2 * kPointerSize;
+  static const int kReceiverOffset = 3 * kPointerSize;
+  static const int kFunctionOffset = 4 * kPointerSize;
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(Operand(rbp, kArgumentsOffset));
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+  // Check the stack for overflow. We are not trying need to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+  __ movq(rcx, rsp);
+  // Make rcx the space we have left. The stack might already be overflowed
+  // here which will cause rcx to become negative.
+  __ subq(rcx, kScratchRegister);
+  // Make rdx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmpq(rcx, rdx);
+  __ j(greater, &okay);  // Signed comparison.
+
+  // Out of stack space.
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(rax);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  __ bind(&okay);
+  // End of stack check.
+
+  // Push current index and limit.
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+  __ push(rax);  // limit
+  __ push(Immediate(0));  // index
+
+  // Change context eagerly to get the right global object if
+  // necessary.
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ movq(rbx, Operand(rbp, kReceiverOffset));
+  __ JumpIfSmi(rbx, &call_to_object);
+  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+  __ j(equal, &use_global_receiver);
+  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &use_global_receiver);
+
+  // If given receiver is already a JavaScript object then there's no
+  // reason for converting it.
+  __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
+  __ j(below, &call_to_object);
+  __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
+  __ j(below_equal, &push_receiver);
+
+  // Convert the receiver to an object.
+  __ bind(&call_to_object);
+  __ push(rbx);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ movq(rbx, rax);
+  __ jmp(&push_receiver);
+
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+
+  // Push the receiver.
+  __ bind(&push_receiver);
+  __ push(rbx);
+
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ movq(rax, Operand(rbp, kIndexOffset));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
+
+  // Use inline caching to speed up access to arguments.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // It is important that we do not have a test instruction after the
+  // call.  A test instruction after the call is used to indicate that
+  // we have generated an inline version of the keyed load.  In this
+  // case, we know that we are not generating a test instruction next.
+
+  // Push the nth argument.
+  __ push(rax);
+
+  // Update the index on the stack and in register rax.
+  __ movq(rax, Operand(rbp, kIndexOffset));
+  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+  __ movq(Operand(rbp, kIndexOffset), rax);
+
+  __ bind(&entry);
+  __ cmpq(rax, Operand(rbp, kLimitOffset));
+  __ j(not_equal, &loop);
+
+  // Invoke the function.
+  ParameterCount actual(rax);
+  __ SmiToInteger32(rax, rax);
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove function, receiver, and arguments
+}
+
+
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
+// Allocate an empty JSArray. The allocated array is put into the result
+// register. If the parameter initial_capacity is larger than zero an elements
+// backing store is allocated with this size and filled with the hole values.
+// Otherwise the elements backing store is set to the empty FixedArray.
+static void AllocateEmptyJSArray(MacroAssembler* masm,
+                                 Register array_function,
+                                 Register result,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Register scratch3,
+                                 int initial_capacity,
+                                 Label* gc_required) {
+  ASSERT(initial_capacity >= 0);
+
+  // Load the initial map from the array function.
+  __ movq(scratch1, FieldOperand(array_function,
+                                 JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Allocate the JSArray object together with space for a fixed array with the
+  // requested elements.
+  int size = JSArray::kSize;
+  if (initial_capacity > 0) {
+    size += FixedArray::SizeFor(initial_capacity);
+  }
+  __ AllocateInNewSpace(size,
+                        result,
+                        scratch2,
+                        scratch3,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // scratch1: initial map
+  // scratch2: start of next object
+  __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
+  __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
+          Factory::empty_fixed_array());
+  // Field JSArray::kElementsOffset is initialized later.
+  __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
+
+  // If no storage is requested for the elements array just set the empty
+  // fixed array.
+  if (initial_capacity == 0) {
+    __ Move(FieldOperand(result, JSArray::kElementsOffset),
+            Factory::empty_fixed_array());
+    return;
+  }
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // scratch2: start of next object
+  __ lea(scratch1, Operand(result, JSArray::kSize));
+  __ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
+
+  // Initialize the FixedArray and fill it with holes. FixedArray length is
+  // stored as a smi.
+  // result: JSObject
+  // scratch1: elements array
+  // scratch2: start of next object
+  __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
+          Factory::fixed_array_map());
+  __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
+          Smi::FromInt(initial_capacity));
+
+  // Fill the FixedArray with the hole value. Inline the code if short.
+  // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
+  static const int kLoopUnfoldLimit = 4;
+  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  __ Move(scratch3, Factory::the_hole_value());
+  if (initial_capacity <= kLoopUnfoldLimit) {
+    // Use a scratch register here to have only one reloc info when unfolding
+    // the loop.
+    for (int i = 0; i < initial_capacity; i++) {
+      __ movq(FieldOperand(scratch1,
+                           FixedArray::kHeaderSize + i * kPointerSize),
+              scratch3);
+    }
+  } else {
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(Operand(scratch1, 0), scratch3);
+    __ addq(scratch1, Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmpq(scratch1, scratch2);
+    __ j(below, &loop);
+  }
+}
+
+
+// Allocate a JSArray with the number of elements stored in a register. The
+// register array_function holds the built-in Array function and the register
+// array_size holds the size of the array as a smi. The allocated array is put
+// into the result register and beginning and end of the FixedArray elements
+// storage is put into registers elements_array and elements_array_end  (see
+// below for when that is not the case). If the parameter fill_with_holes is
+// true the allocated elements backing store is filled with the hole values
+// otherwise it is left uninitialized. When the backing store is filled the
+// register elements_array is scratched.
+static void AllocateJSArray(MacroAssembler* masm,
+                            Register array_function,  // Array function.
+                            Register array_size,  // As a smi.
+                            Register result,
+                            Register elements_array,
+                            Register elements_array_end,
+                            Register scratch,
+                            bool fill_with_hole,
+                            Label* gc_required) {
+  Label not_empty, allocated;
+
+  // Load the initial map from the array function.
+  __ movq(elements_array,
+          FieldOperand(array_function,
+                       JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ testq(array_size, array_size);
+  __ j(not_zero, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
+
+  // Allocate the JSArray object together with space for a FixedArray with the
+  // requested elements.
+  __ bind(&not_empty);
+  SmiIndex index =
+      masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
+  __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
+                        index.scale,
+                        index.reg,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+
+  // Allocated the JSArray. Now initialize the fields except for the elements
+  // array.
+  // result: JSObject
+  // elements_array: initial map
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ bind(&allocated);
+  __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
+  __ Move(elements_array, Factory::empty_fixed_array());
+  __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
+  // Field JSArray::kElementsOffset is initialized later.
+  __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
+
+  // Calculate the location of the elements array and set elements array member
+  // of the JSArray.
+  // result: JSObject
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ lea(elements_array, Operand(result, JSArray::kSize));
+  __ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
+
+  // Initialize the fixed array. FixedArray length is stored as a smi.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  // array_size: size of array (smi)
+  __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
+          Factory::fixed_array_map());
+  Label not_empty_2, fill_array;
+  __ SmiTest(array_size);
+  __ j(not_zero, &not_empty_2);
+  // Length of the FixedArray is the number of pre-allocated elements even
+  // though the actual JSArray has length 0.
+  __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
+          Smi::FromInt(kPreallocatedArrayElements));
+  __ jmp(&fill_array);
+  __ bind(&not_empty_2);
+  // For non-empty JSArrays the length of the FixedArray and the JSArray is the
+  // same.
+  __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
+
+  // Fill the allocated FixedArray with the hole value if requested.
+  // result: JSObject
+  // elements_array: elements array
+  // elements_array_end: start of next object
+  __ bind(&fill_array);
+  if (fill_with_hole) {
+    Label loop, entry;
+    __ Move(scratch, Factory::the_hole_value());
+    __ lea(elements_array, Operand(elements_array,
+                                   FixedArray::kHeaderSize - kHeapObjectTag));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(Operand(elements_array, 0), scratch);
+    __ addq(elements_array, Immediate(kPointerSize));
+    __ bind(&entry);
+    __ cmpq(elements_array, elements_array_end);
+    __ j(below, &loop);
+  }
+}
+
+
+// Create a new array for the built-in Array function. This function allocates
+// the JSArray object and the FixedArray elements array and initializes these.
+// If the Array cannot be constructed in native code the runtime is called. This
+// function assumes the following state:
+//   rdi: constructor (built-in Array function)
+//   rax: argc
+//   rsp[0]: return address
+//   rsp[8]: last argument
+// This function is used for both construct and normal calls of Array. The only
+// difference between handling a construct call and a normal call is that for a
+// construct call the constructor function in rdi needs to be preserved for
+// entering the generic code. In both cases argc in rax needs to be preserved.
+// Both registers are preserved by this code so no need to differentiate between
+// a construct call and a normal call.
+static void ArrayNativeCode(MacroAssembler* masm,
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
+
+  // Check for array construction with zero arguments.
+  __ testq(rax, rax);
+  __ j(not_zero, &argc_one_or_more);
+
+  // Handle construction of an empty array.
+  AllocateEmptyJSArray(masm,
+                       rdi,
+                       rbx,
+                       rcx,
+                       rdx,
+                       r8,
+                       kPreallocatedArrayElements,
+                       call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ movq(rax, rbx);
+  __ ret(kPointerSize);
+
+  // Check for one argument. Bail out if argument is not smi or if it is
+  // negative.
+  __ bind(&argc_one_or_more);
+  __ cmpq(rax, Immediate(1));
+  __ j(not_equal, &argc_two_or_more);
+  __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
+  __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
+
+  // Handle construction of an empty array of a certain size. Bail out if size
+  // is to large to actually allocate an elements array.
+  __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
+  __ j(greater_equal, call_generic_code);
+
+  // rax: argc
+  // rdx: array_size (smi)
+  // rdi: constructor
+  // esp[0]: return address
+  // esp[8]: argument
+  AllocateJSArray(masm,
+                  rdi,
+                  rdx,
+                  rbx,
+                  rcx,
+                  r8,
+                  r9,
+                  true,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+  __ movq(rax, rbx);
+  __ ret(2 * kPointerSize);
+
+  // Handle construction of an array from a list of arguments.
+  __ bind(&argc_two_or_more);
+  __ movq(rdx, rax);
+  __ Integer32ToSmi(rdx, rdx);  // Convet argc to a smi.
+  // rax: argc
+  // rdx: array_size (smi)
+  // rdi: constructor
+  // esp[0] : return address
+  // esp[8] : last argument
+  AllocateJSArray(masm,
+                  rdi,
+                  rdx,
+                  rbx,
+                  rcx,
+                  r8,
+                  r9,
+                  false,
+                  call_generic_code);
+  __ IncrementCounter(&Counters::array_function_native, 1);
+
+  // rax: argc
+  // rbx: JSArray
+  // rcx: elements_array
+  // r8: elements_array_end (untagged)
+  // esp[0]: return address
+  // esp[8]: last argument
+
+  // Location of the last argument
+  __ lea(r9, Operand(rsp, kPointerSize));
+
+  // Location of the first array element (Parameter fill_with_holes to
+  // AllocateJSArrayis false, so the FixedArray is returned in rcx).
+  __ lea(rdx, Operand(rcx, FixedArray::kHeaderSize - kHeapObjectTag));
+
+  // rax: argc
+  // rbx: JSArray
+  // rdx: location of the first array element
+  // r9: location of the last argument
+  // esp[0]: return address
+  // esp[8]: last argument
+  Label loop, entry;
+  __ movq(rcx, rax);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+  __ movq(Operand(rdx, 0), kScratchRegister);
+  __ addq(rdx, Immediate(kPointerSize));
+  __ bind(&entry);
+  __ decq(rcx);
+  __ j(greater_equal, &loop);
+
+  // Remove caller arguments from the stack and return.
+  // rax: argc
+  // rbx: JSArray
+  // esp[0]: return address
+  // esp[8]: last argument
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+  __ push(rcx);
+  __ movq(rax, rbx);
+  __ ret(0);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rdi);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array function shoud be a map.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    ASSERT(kSmiTag == 0);
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for Array function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Check(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  ArrayNativeCode(masm, &generic_array_code);
+
+  // Jump to the generic array code in case the specialized code cannot handle
+  // the construction.
+  __ bind(&generic_array_code);
+  Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
+  Handle<Code> array_code(code);
+  __ Jump(array_code, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : argc
+  //  -- rdi : constructor
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : last argument
+  // -----------------------------------
+  Label generic_constructor;
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the builtin Array function which
+    // does always have a map.
+    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rbx);
+    __ cmpq(rdi, rbx);
+    __ Check(equal, "Unexpected Array function");
+    // Initial map for the builtin Array function should be a map.
+    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    ASSERT(kSmiTag == 0);
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for Array function");
+    __ CmpObjectType(rbx, MAP_TYPE, rcx);
+    __ Check(equal, "Unexpected initial map for Array function");
+  }
+
+  // Run the native code for the Array function called as constructor.
+  ArrayNativeCode(masm, &generic_constructor);
+
+  // Jump to the generic construct code in case the specialized code cannot
+  // handle the construction.
+  __ bind(&generic_constructor);
+  Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+  // TODO(849): implement custom construct stub.
+  // Generate a copy of the generic stub for now.
+  Generate_JSConstructStubGeneric(masm);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(rbp);
+  __ movq(rbp, rsp);
+
+  // Store the arguments adaptor context sentinel.
+  __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+
+  // Push the function on the stack.
+  __ push(rdi);
+
+  // Preserve the number of arguments on the stack. Must preserve both
+  // rax and rbx because these registers are used when copying the
+  // arguments and the receiver.
+  __ Integer32ToSmi(rcx, rax);
+  __ push(rcx);
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack. Number is a Smi.
+  __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+
+  // Remove caller arguments from the stack.
+  __ pop(rcx);
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+  __ push(rcx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : actual number of arguments
+  //  -- rbx : expected number of arguments
+  //  -- rdx : code entry to call
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+  __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+  Label enough, too_few;
+  __ cmpq(rax, rbx);
+  __ j(less, &too_few);
+  __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+    __ movq(rcx, Immediate(-1));  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incq(rcx);
+    __ push(Operand(rax, 0));
+    __ subq(rax, Immediate(kPointerSize));
+    __ cmpq(rcx, rbx);
+    __ j(less, &copy);
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+    __ movq(rcx, Immediate(-1));  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incq(rcx);
+    __ push(Operand(rdi, 0));
+    __ subq(rdi, Immediate(kPointerSize));
+    __ cmpq(rcx, rax);
+    __ j(less, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ bind(&fill);
+    __ incq(rcx);
+    __ push(kScratchRegister);
+    __ cmpq(rcx, rbx);
+    __ j(less, &fill);
+
+    // Restore function pointer.
+    __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ call(rdx);
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ jmp(rdx);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  __ int3();
+}
+
+
+#undef __
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 14e3527..c3eb5bf 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -57,12 +57,14 @@
   // write barrier because the allocated object is in new space.
   __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
   __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
   __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
   __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
   __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
   __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
   __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
+  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
@@ -983,6 +985,14 @@
 }
 
 
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+    TRBinaryOpIC::TypeInfo type_info,
+    TRBinaryOpIC::TypeInfo result_type_info) {
+  UNIMPLEMENTED();
+  return Handle<Code>::null();
+}
+
+
 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
   // Input on stack:
   // rsp[8]: argument (should be number).
@@ -1107,6 +1117,7 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
+    case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
       return Runtime::kAbort;
@@ -1121,73 +1132,76 @@
   // rcx: Pointer to cache entry. Must be preserved.
   // st(0): Input double
   Label done;
-  ASSERT(type_ == TranscendentalCache::SIN ||
-         type_ == TranscendentalCache::COS);
-  // More transcendental types can be added later.
+  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
+    // Both fsin and fcos require arguments in the range +/-2^63 and
+    // return NaN for infinities and NaN. They can share all code except
+    // the actual fsin/fcos operation.
+    Label in_range;
+    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+    // work. We must reduce it to the appropriate range.
+    __ movq(rdi, rbx);
+    // Move exponent and sign bits to low bits.
+    __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
+    // Remove sign bit.
+    __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
+    int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
+    __ cmpl(rdi, Immediate(supported_exponent_limit));
+    __ j(below, &in_range);
+    // Check for infinity and NaN. Both return NaN for sin.
+    __ cmpl(rdi, Immediate(0x7ff));
+    __ j(equal, on_nan_result);
 
-  // Both fsin and fcos require arguments in the range +/-2^63 and
-  // return NaN for infinities and NaN. They can share all code except
-  // the actual fsin/fcos operation.
-  Label in_range;
-  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
-  // work. We must reduce it to the appropriate range.
-  __ movq(rdi, rbx);
-  // Move exponent and sign bits to low bits.
-  __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
-  // Remove sign bit.
-  __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
-  int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
-  __ cmpl(rdi, Immediate(supported_exponent_limit));
-  __ j(below, &in_range);
-  // Check for infinity and NaN. Both return NaN for sin.
-  __ cmpl(rdi, Immediate(0x7ff));
-  __ j(equal, on_nan_result);
+    // Use fpmod to restrict argument to the range +/-2*PI.
+    __ fldpi();
+    __ fadd(0);
+    __ fld(1);
+    // FPU Stack: input, 2*pi, input.
+    {
+      Label no_exceptions;
+      __ fwait();
+      __ fnstsw_ax();
+      // Clear if Illegal Operand or Zero Division exceptions are set.
+      __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
+      __ j(zero, &no_exceptions);
+      __ fnclex();
+      __ bind(&no_exceptions);
+    }
 
-  // Use fpmod to restrict argument to the range +/-2*PI.
-  __ fldpi();
-  __ fadd(0);
-  __ fld(1);
-  // FPU Stack: input, 2*pi, input.
-  {
-    Label no_exceptions;
-    __ fwait();
-    __ fnstsw_ax();
-    // Clear if Illegal Operand or Zero Division exceptions are set.
-    __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
-    __ j(zero, &no_exceptions);
-    __ fnclex();
-    __ bind(&no_exceptions);
+    // Compute st(0) % st(1)
+    {
+      NearLabel partial_remainder_loop;
+      __ bind(&partial_remainder_loop);
+      __ fprem1();
+      __ fwait();
+      __ fnstsw_ax();
+      __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
+      // If C2 is set, computation only has partial result. Loop to
+      // continue computation.
+      __ j(not_zero, &partial_remainder_loop);
   }
-
-  // Compute st(0) % st(1)
-  {
-    NearLabel partial_remainder_loop;
-    __ bind(&partial_remainder_loop);
-    __ fprem1();
-    __ fwait();
-    __ fnstsw_ax();
-    __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
-    // If C2 is set, computation only has partial result. Loop to
-    // continue computation.
-    __ j(not_zero, &partial_remainder_loop);
+    // FPU Stack: input, 2*pi, input % 2*pi
+    __ fstp(2);
+    // FPU Stack: input % 2*pi, 2*pi,
+    __ fstp(0);
+    // FPU Stack: input % 2*pi
+    __ bind(&in_range);
+    switch (type_) {
+      case TranscendentalCache::SIN:
+        __ fsin();
+        break;
+      case TranscendentalCache::COS:
+        __ fcos();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    __ bind(&done);
+  } else {
+    ASSERT(type_ == TranscendentalCache::LOG);
+    __ fldln2();
+    __ fxch();
+    __ fyl2x();
   }
-  // FPU Stack: input, 2*pi, input % 2*pi
-  __ fstp(2);
-  // FPU Stack: input % 2*pi, 2*pi,
-  __ fstp(0);
-  // FPU Stack: input % 2*pi
-  __ bind(&in_range);
-  switch (type_) {
-    case TranscendentalCache::SIN:
-      __ fsin();
-      break;
-    case TranscendentalCache::COS:
-      __ fcos();
-      break;
-    default:
-      UNREACHABLE();
-  }
-  __ bind(&done);
 }
 
 
@@ -1999,6 +2013,90 @@
 }
 
 
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+  const int kMaxInlineLength = 100;
+  Label slowcase;
+  Label done;
+  __ movq(r8, Operand(rsp, kPointerSize * 3));
+  __ JumpIfNotSmi(r8, &slowcase);
+  __ SmiToInteger32(rbx, r8);
+  __ cmpl(rbx, Immediate(kMaxInlineLength));
+  __ j(above, &slowcase);
+  // Smi-tagging is equivalent to multiplying by 2.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  // Allocate RegExpResult followed by FixedArray with size in ebx.
+  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
+  // Elements:  [Map][Length][..elements..]
+  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
+                        times_pointer_size,
+                        rbx,  // In: Number of elements.
+                        rax,  // Out: Start of allocation (tagged).
+                        rcx,  // Out: End of allocation.
+                        rdx,  // Scratch register
+                        &slowcase,
+                        TAG_OBJECT);
+  // rax: Start of allocated area, object-tagged.
+  // rbx: Number of array elements as int32.
+  // r8: Number of array elements as smi.
+
+  // Set JSArray map to global.regexp_result_map().
+  __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+  __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
+  __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
+
+  // Set empty properties FixedArray.
+  __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
+          Factory::empty_fixed_array());
+
+  // Set elements to point to FixedArray allocated right after the JSArray.
+  __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
+  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+
+  // Set input, index and length fields from arguments.
+  __ movq(r8, Operand(rsp, kPointerSize * 1));
+  __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
+  __ movq(r8, Operand(rsp, kPointerSize * 2));
+  __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
+  __ movq(r8, Operand(rsp, kPointerSize * 3));
+  __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
+
+  // Fill out the elements FixedArray.
+  // rax: JSArray.
+  // rcx: FixedArray.
+  // rbx: Number of elements in array as int32.
+
+  // Set map.
+  __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
+          Factory::fixed_array_map());
+  // Set length.
+  __ Integer32ToSmi(rdx, rbx);
+  __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
+  // Fill contents of fixed-array with the-hole.
+  __ Move(rdx, Factory::the_hole_value());
+  __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
+  // Fill fixed array elements with hole.
+  // rax: JSArray.
+  // rbx: Number of elements in array that remains to be filled, as int32.
+  // rcx: Start of elements in FixedArray.
+  // rdx: the hole.
+  Label loop;
+  __ testl(rbx, rbx);
+  __ bind(&loop);
+  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
+  __ subl(rbx, Immediate(1));
+  __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
+  __ jmp(&loop);
+
+  __ bind(&done);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&slowcase);
+  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
+}
+
+
 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
                                                          Register object,
                                                          Register result,
@@ -3986,6 +4084,25 @@
   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+  UNIMPLEMENTED();
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 18213b9..eb7ad26 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -149,7 +149,7 @@
   class ArgsReversedBits: public BitField<bool, 10, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
   class StaticTypeInfoBits: public BitField<int, 12, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index e9f29f0..9a25572 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -104,12 +104,12 @@
 }
 
 
-void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
 
 
-void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveInternalFrame();
 }
 
@@ -6490,94 +6490,13 @@
 
 
 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  // No stub. This code only occurs a few times in regexp.js.
-  const int kMaxInlineLength = 100;
   ASSERT_EQ(3, args->length());
   Load(args->at(0));  // Size of array, smi.
   Load(args->at(1));  // "index" property value.
   Load(args->at(2));  // "input" property value.
-  {
-    VirtualFrame::SpilledScope spilled_scope;
-
-    Label slowcase;
-    Label done;
-    __ movq(r8, Operand(rsp, kPointerSize * 2));
-    __ JumpIfNotSmi(r8, &slowcase);
-    __ SmiToInteger32(rbx, r8);
-    __ cmpl(rbx, Immediate(kMaxInlineLength));
-    __ j(above, &slowcase);
-    // Smi-tagging is equivalent to multiplying by 2.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    // Allocate RegExpResult followed by FixedArray with size in ebx.
-    // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
-    // Elements:  [Map][Length][..elements..]
-    __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          rbx,  // In: Number of elements.
-                          rax,  // Out: Start of allocation (tagged).
-                          rcx,  // Out: End of allocation.
-                          rdx,  // Scratch register
-                          &slowcase,
-                          TAG_OBJECT);
-    // rax: Start of allocated area, object-tagged.
-    // rbx: Number of array elements as int32.
-    // r8: Number of array elements as smi.
-
-    // Set JSArray map to global.regexp_result_map().
-    __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
-    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
-    __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
-    __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
-
-    // Set empty properties FixedArray.
-    __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
-            Factory::empty_fixed_array());
-
-    // Set elements to point to FixedArray allocated right after the JSArray.
-    __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
-    __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
-    // Set input, index and length fields from arguments.
-    __ pop(FieldOperand(rax, JSRegExpResult::kInputOffset));
-    __ pop(FieldOperand(rax, JSRegExpResult::kIndexOffset));
-    __ lea(rsp, Operand(rsp, kPointerSize));
-    __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
-
-    // Fill out the elements FixedArray.
-    // rax: JSArray.
-    // rcx: FixedArray.
-    // rbx: Number of elements in array as int32.
-
-    // Set map.
-    __ Move(FieldOperand(rcx, HeapObject::kMapOffset),
-            Factory::fixed_array_map());
-    // Set length.
-    __ Integer32ToSmi(rdx, rbx);
-    __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
-    // Fill contents of fixed-array with the-hole.
-    __ Move(rdx, Factory::the_hole_value());
-    __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
-    // Fill fixed array elements with hole.
-    // rax: JSArray.
-    // rbx: Number of elements in array that remains to be filled, as int32.
-    // rcx: Start of elements in FixedArray.
-    // rdx: the hole.
-    Label loop;
-    __ testl(rbx, rbx);
-    __ bind(&loop);
-    __ j(less_equal, &done);  // Jump if ecx is negative or zero.
-    __ subl(rbx, Immediate(1));
-    __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
-    __ jmp(&loop);
-
-    __ bind(&slowcase);
-    __ CallRuntime(Runtime::kRegExpConstructResult, 3);
-
-    __ bind(&done);
-  }
-  frame_->Forget(3);
-  frame_->Push(rax);
+  RegExpConstructResultStub stub;
+  Result result = frame_->CallStub(&stub, 3);
+  frame_->Push(&result);
 }
 
 
@@ -6865,9 +6784,9 @@
 
   // Check that both indices are valid.
   __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
-  __ cmpl(tmp2.reg(), index1.reg());
+  __ SmiCompare(tmp2.reg(), index1.reg());
   deferred->Branch(below_equal);
-  __ cmpl(tmp2.reg(), index2.reg());
+  __ SmiCompare(tmp2.reg(), index2.reg());
   deferred->Branch(below_equal);
 
   // Bring addresses into index1 and index2.
@@ -7118,6 +7037,15 @@
 }
 
 
+void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+  TranscendentalCacheStub stub(TranscendentalCache::LOG);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
 // Generates the Math.sqrt method. Please note - this function assumes that
 // the callsite has executed ToNumber on the argument.
 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
@@ -7946,7 +7874,7 @@
     case Token::INSTANCEOF: {
       Load(left);
       Load(right);
-      InstanceofStub stub;
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       Result answer = frame_->CallStub(&stub, 2);
       answer.ToRegister();
       __ testq(answer.reg(), answer.reg());
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 1a5e7df..b308f64 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -308,6 +308,9 @@
                                        Code::Flags flags,
                                        CompilationInfo* info);
 
+  // Print the code after compiling it.
+  static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -370,8 +373,9 @@
   // Node visitors.
   void VisitStatements(ZoneList<Statement*>* statements);
 
-#define DEF_VISIT(type) \
-  void Visit##type(type* node);
+  virtual void VisitSlot(Slot* node);
+#define DEF_VISIT(type)                         \
+  virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
@@ -664,14 +668,16 @@
   void GenerateMathSin(ZoneList<Expression*>* args);
   void GenerateMathCos(ZoneList<Expression*>* args);
   void GenerateMathSqrt(ZoneList<Expression*>* args);
+  void GenerateMathLog(ZoneList<Expression*>* args);
 
+  // Check whether two RegExps are equivalent.
   void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
 
   void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
   void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
   void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
 
-// Simple condition analysis.
+  // Simple condition analysis.
   enum ConditionAnalysis {
     ALWAYS_TRUE,
     ALWAYS_FALSE,
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index a43a02b..30134bf 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -42,7 +42,7 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures::Probe();
+  CpuFeatures::Probe(true);
 }
 
 
diff --git a/test/mjsunit/regress/regress-1146.js b/src/x64/deoptimizer-x64.cc
similarity index 60%
copy from test/mjsunit/regress/regress-1146.js
copy to src/x64/deoptimizer-x64.cc
index e8028ce..4e890cd 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,53 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+#include "v8.h"
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+namespace v8 {
+namespace internal {
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+
+int Deoptimizer::table_entry_size_ = 10;
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+                                      Code* replacement_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::EntryGenerator::Generate() {
+  UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  UNIMPLEMENTED();
+}
+
+} }  // namespace v8::internal
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 4213912..7502d61 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -906,7 +906,9 @@
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
             case 0xEB: mnem = "fldpi"; break;
+            case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
+            case 0xF1: mnem = "fyl2x"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 9991981..fbbf176 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -43,6 +43,12 @@
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
+// Number of registers for which space is reserved in safepoints.
+// TODO(x64): This should not be 0.
+static const int kNumSafepointRegisters = 0;
+
+// ----------------------------------------------------
+
 class StackHandlerConstants : public AllStatic {
  public:
   static const int kNextOffset  = 0 * kPointerSize;
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a1aa976..dd28d4d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -170,7 +170,12 @@
     }
   }
 
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Stack check");
+    PrepareForBailout(info->function(), NO_REGISTERS);
     NearLabel ok;
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
     __ j(above_equal, &ok);
@@ -179,10 +184,6 @@
     __ bind(&ok);
   }
 
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-
   { Comment cmnt(masm_, "[ Body");
     ASSERT(loop_depth() == 0);
     VisitStatements(function()->body());
@@ -202,6 +203,20 @@
 }
 
 
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  Comment cmnt(masm_, "[ Stack check");
+  NearLabel ok;
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  __ j(above_equal, &ok);
+  StackCheckStub stub;
+  __ CallStub(&stub);
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+  RecordStackCheck(stmt->OsrEntryId());
+}
+
+
 void FullCodeGenerator::EmitReturnSequence() {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
@@ -266,6 +281,7 @@
 
 void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
   codegen()->Move(result_register(), slot);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
@@ -287,12 +303,16 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   if (index == Heap::kUndefinedValueRootIndex ||
       index == Heap::kNullValueRootIndex ||
       index == Heap::kFalseValueRootIndex) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (index == Heap::kTrueValueRootIndex) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
     __ LoadRoot(result_register(), index);
     codegen()->DoTest(true_label_, false_label_, fall_through_);
@@ -316,22 +336,26 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
+                                          true,
+                                          true_label_,
+                                          false_label_);
   ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
-    __ jmp(false_label_);
+    if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
-    __ jmp(true_label_);
+    if (true_label_ != fall_through_) __ jmp(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else if (lit->IsSmi()) {
     if (Smi::cast(*lit)->value() == 0) {
-      __ jmp(false_label_);
+      if (false_label_ != fall_through_) __ jmp(false_label_);
     } else {
-      __ jmp(true_label_);
+      if (true_label_ != fall_through_) __ jmp(true_label_);
     }
   } else {
     // For simplicity we always test the accumulator register.
@@ -371,13 +395,14 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(true_label_, false_label_, fall_through_);
 }
 
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT_EQ(materialize_true, materialize_false);
+  ASSERT(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -410,8 +435,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_false == false_label_);
   ASSERT(materialize_true == true_label_);
+  ASSERT(materialize_false == false_label_);
 }
 
 
@@ -434,6 +459,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   if (flag) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
   } else {
@@ -525,6 +551,13 @@
 }
 
 
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+}
+
+
 void FullCodeGenerator::EmitDeclaration(Variable* variable,
                                         Variable::Mode mode,
                                         FunctionLiteral* function) {
@@ -811,26 +844,20 @@
   __ bind(&update_each);
   __ movq(result_register(), rbx);
   // Perform the assignment as if via '='.
-  EmitAssignment(stmt->each());
+  { EffectContext context(this);
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
+  }
 
   // Generate code for the body of the loop.
-  Label stack_limit_hit, stack_check_done;
   Visit(stmt->body());
 
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_done);
-
   // Generate code for going to the next element by incrementing the
   // index (smi) stored on top of the stack.
   __ bind(loop_statement.continue_target());
   __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
-  __ jmp(&loop);
 
-  // Slow case for the stack limit check.
-  StackCheckStub stack_check_stub;
-  __ bind(&stack_limit_hit);
-  __ CallStub(&stack_check_stub);
-  __ jmp(&stack_check_done);
+  EmitStackCheck(stmt);
+  __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_target());
@@ -1393,6 +1420,7 @@
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
                              expr->op());
+      context()->Plug(rax);
       break;
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
@@ -1501,7 +1529,7 @@
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1549,6 +1577,7 @@
       break;
     }
   }
+  context()->Plug(rax);
 }
 
 
@@ -1621,8 +1650,6 @@
     }
     __ bind(&done);
   }
-
-  context()->Plug(rax);
 }
 
 
@@ -1659,10 +1686,9 @@
     __ push(Operand(rsp, kPointerSize));  // Receiver is under value.
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(rax);
-    context()->DropAndPlug(1, rax);
-  } else {
-    context()->Plug(rax);
+    __ Drop(1);
   }
+  context()->Plug(rax);
 }
 
 
@@ -1711,13 +1737,14 @@
   if (key->IsPropertyName()) {
     VisitForAccumulatorValue(expr->obj());
     EmitNamedPropertyLoad(expr);
+    context()->Plug(rax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(rdx);
     EmitKeyedPropertyLoad(expr);
+    context()->Plug(rax);
   }
-  context()->Plug(rax);
 }
 
 
@@ -1727,14 +1754,14 @@
   // Code common for calls using the IC.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
     __ Move(rcx, name);
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeCallInitialize(arg_count, in_loop);
@@ -1760,13 +1787,13 @@
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   // Call the IC initialization code.
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arg_count, in_loop);
@@ -1782,13 +1809,13 @@
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
-  { PreserveStatementPositionScope scope(masm()->positions_recorder());
+  { PreservePositionScope scope(masm()->positions_recorder());
     for (int i = 0; i < arg_count; i++) {
       VisitForStackValue(args->at(i));
     }
   }
   // Record source position for debugger.
-  SetSourcePosition(expr->position(), FORCED_POSITION);
+  SetSourcePosition(expr->position());
   InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
   CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
@@ -1811,7 +1838,7 @@
     // arguments.
     ZoneList<Expression*>* args = expr->arguments();
     int arg_count = args->length();
-    { PreserveStatementPositionScope pos_scope(masm()->positions_recorder());
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
       VisitForStackValue(fun);
       __ PushRoot(Heap::kUndefinedValueRootIndex);  // Reserved receiver slot.
 
@@ -1840,7 +1867,7 @@
       __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
     }
     // Record source position for debugger.
-    SetSourcePosition(expr->position(), FORCED_POSITION);
+    SetSourcePosition(expr->position());
     InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
     CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
     __ CallStub(&stub);
@@ -1857,7 +1884,7 @@
     // Call to a lookup slot (dynamically introduced variable).
     Label slow, done;
 
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
       EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
@@ -1898,7 +1925,7 @@
     Literal* key = prop->key()->AsLiteral();
     if (key != NULL && key->handle()->IsSymbol()) {
       // Call to a named property, use call IC.
-      { PreserveStatementPositionScope scope(masm()->positions_recorder());
+      { PreservePositionScope scope(masm()->positions_recorder());
         VisitForStackValue(prop->obj());
       }
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
@@ -1906,16 +1933,16 @@
       // Call to a keyed property.
       // For a synthetic property use keyed load IC followed by function call,
       // for a regular property use KeyedCallIC.
-      { PreserveStatementPositionScope scope(masm()->positions_recorder());
+      { PreservePositionScope scope(masm()->positions_recorder());
         VisitForStackValue(prop->obj());
       }
       if (prop->is_synthetic()) {
-        { PreserveStatementPositionScope scope(masm()->positions_recorder());
+        { PreservePositionScope scope(masm()->positions_recorder());
           VisitForAccumulatorValue(prop->key());
           __ movq(rdx, Operand(rsp, 0));
         }
         // Record source code position for IC call.
-        SetSourcePosition(prop->position(), FORCED_POSITION);
+        SetSourcePosition(prop->position());
         Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
         EmitCallIC(ic, RelocInfo::CODE_TARGET);
         // Pop receiver.
@@ -1940,7 +1967,7 @@
         loop_depth() == 0) {
       lit->set_try_full_codegen(true);
     }
-    { PreserveStatementPositionScope scope(masm()->positions_recorder());
+    { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
     // Load global receiver object.
@@ -2628,6 +2655,16 @@
 }
 
 
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::LOG);
+  ASSERT(args->length() == 1);
+  VisitForStackValue(args->at(0));
+  __ CallStub(&stub);
+  context()->Plug(rax);
+}
+
+
 void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
   ASSERT(args->length() == 1);
@@ -2657,11 +2694,12 @@
 
 
 void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  RegExpConstructResultStub stub;
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
-  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  __ CallStub(&stub);
   context()->Plug(rax);
 }
 
@@ -2923,7 +2961,7 @@
     case Token::ADD: {
       Comment cmt(masm_, "[ UnaryOperation (ADD)");
       VisitForAccumulatorValue(expr->expression());
-      NearLabel no_conversion;
+      Label no_conversion;
       Condition is_smi = masm_->CheckSmi(result_register());
       __ j(is_smi, &no_conversion);
       __ push(result_register());
@@ -3076,6 +3114,10 @@
       __ SmiAddConstant(rax, rax, Smi::FromInt(1));
     }
   }
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
   // Call stub for +1/-1.
   GenericBinaryOpStub stub(expr->binary_op(),
                            NO_OVERWRITE,
@@ -3091,6 +3133,7 @@
         { EffectContext context(this);
           EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                  Token::ASSIGN);
+          context.Plug(rax);
         }
         // For all contexts except kEffect: We have the result on
         // top of the stack.
@@ -3101,6 +3144,7 @@
         // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
                                Token::ASSIGN);
+        context()->Plug(rax);
       }
       break;
     case NAMED_PROPERTY: {
@@ -3292,7 +3336,7 @@
 
     case Token::INSTANCEOF: {
       VisitForStackValue(expr->right());
-      InstanceofStub stub;
+      InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
       __ testq(rax, rax);
        // The stub returns 0 for true.
@@ -3413,6 +3457,9 @@
          mode == RelocInfo::CODE_TARGET_CONTEXT);
   __ call(ic, mode);
 
+  // Crankshaft doesn't need patching of inlined loads and stores.
+  if (V8::UseCrankshaft()) return;
+
   // If we're calling a (keyed) load or store stub, we have to mark
   // the call as containing no inlined code so we will not attempt to
   // patch it.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 204dd6a..aff778a 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -108,9 +108,6 @@
                                            Register name,
                                            Register r0,
                                            Register r1) {
-  // Assert that name contains a string.
-  if (FLAG_debug_code) __ AbortIfNotString(name);
-
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
@@ -386,6 +383,8 @@
 
 
 static bool PatchInlinedMapCheck(Address address, Object* map) {
+  if (V8::UseCrankshaft()) return false;
+
   // Arguments are address of start of call sequence that called
   // the IC,
   Address test_instruction_address =
@@ -751,7 +750,7 @@
   char_at_generator.GenerateFast(masm);
   __ ret(0);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
@@ -1590,13 +1589,7 @@
   // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
 
-  // Check if the name is a string.
-  Label miss;
-  __ JumpIfSmi(rcx, &miss);
-  Condition cond = masm->IsObjectStringType(rcx, rax, rax);
-  __ j(NegateCondition(cond), &miss);
   GenerateCallNormal(masm, argc);
-  __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
@@ -1708,6 +1701,8 @@
 
 
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1750,6 +1745,8 @@
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+  if (V8::UseCrankshaft()) return false;
+
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
@@ -1908,9 +1905,77 @@
 }
 
 
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  __ pop(rbx);
+  __ push(rdx);
+  __ push(rcx);
+  __ push(rax);
+  __ push(rbx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+}
+
+
 #undef __
 
 
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
+    case Token::LTE:
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope;
+  Handle<Code> rewritten;
+  State previous_state = GetState();
+  State state = TargetState(previous_state, false, x, y);
+  if (state == GENERIC) {
+    CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+    rewritten = stub.GetCode();
+  } else {
+    ICCompareStub stub(op_, state);
+    rewritten = stub.GetCode();
+  }
+  set_target(*rewritten);
+
+#ifdef DEBUG
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC (%s->%s)#%s]\n",
+           GetStateName(previous_state),
+           GetStateName(state),
+           Token::Name(op_));
+  }
+#endif
+}
+
+void PatchInlinedSmiCode(Address address) {
+  UNIMPLEMENTED();
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/test/mjsunit/regress/regress-1146.js b/src/x64/lithium-codegen-x64.h
similarity index 61%
copy from test/mjsunit/regress/regress-1146.js
copy to src/x64/lithium-codegen-x64.h
index e8028ce..cd1f08d 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+#ifndef V8_X64_LITHIUM_CODEGEN_X64_H_
+#define V8_X64_LITHIUM_CODEGEN_X64_H_
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+#include "x64/lithium-x64.h"
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+
+class LCodeGen BASE_EMBEDDED {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode() {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
new file mode 100644
index 0000000..f66ec16
--- /dev/null
+++ b/src/x64/lithium-x64.h
@@ -0,0 +1,251 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_X64_H_
+#define V8_X64_LITHIUM_X64_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+class LInstruction: public ZoneObject {
+ public:
+  LInstruction() { }
+  virtual ~LInstruction() { }
+
+  // Predicates should be generated by macro as in lithium-ia32.h.
+  virtual bool IsLabel() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+  virtual bool IsOsrEntry() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  LPointerMap* pointer_map() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool HasPointerMap() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+};
+
+
+class LParallelMove : public ZoneObject {
+ public:
+  LParallelMove() { }
+
+  void AddMove(LOperand* from, LOperand* to) {
+    UNIMPLEMENTED();
+  }
+
+  const ZoneList<LMoveOperands>* move_operands() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+};
+
+
+class LGap: public LInstruction {
+ public:
+  explicit LGap(HBasicBlock* block) { }
+
+  HBasicBlock* block() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+};
+
+
+class LLabel: public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block) : LGap(block) { }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+  // Function could be generated by a macro as in lithium-ia32.h.
+  static LOsrEntry* cast(LInstruction* instr) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand** SpilledRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+  LOperand** SpilledDoubleRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+    UNIMPLEMENTED();
+  }
+  void MarkSpilledDoubleRegister(int allocation_index,
+                                 LOperand* spill_operand) {
+    UNIMPLEMENTED();
+  }
+};
+
+
+class LPointerMap: public ZoneObject {
+ public:
+  explicit LPointerMap(int position) { }
+
+  int lithium_position() const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void RecordPointer(LOperand* op) { UNIMPLEMENTED(); }
+};
+
+
+class LChunk: public ZoneObject {
+ public:
+  explicit LChunk(HGraph* graph) { }
+
+  HGraph* graph() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  const ZoneList<LPointerMap*>* pointer_maps() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* GetNextSpillSlot(bool double_slot) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LConstantOperand* DefineConstantOperand(HConstant* constant) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LLabel* GetLabel(int block_id) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  const ZoneList<LInstruction*>* instructions() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  int GetParameterStackSlot(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+  LGap* GetGapAt(int index) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool IsGapAt(int index) const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  int NearestGapPos(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+#ifdef DEBUG
+  void Verify() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+  LChunkBuilder(HGraph* graph, LAllocator* allocator) { }
+
+  // Build the sequence for the graph.
+  LChunk* Build() {
+    UNIMPLEMENTED();
+    return NULL;
+  };
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+    UNIMPLEMENTED(); \
+    return NULL; \
+  }
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_LITHIUM_X64_H_
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 5ea480f..1df9b47 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -74,12 +74,6 @@
 }
 
 
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
-  CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  j(below, on_stack_overflow);
-}
-
-
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register addr,
                                        Register scratch) {
@@ -1504,17 +1498,6 @@
 }
 
 
-void MacroAssembler::AbortIfNotString(Register object) {
-  testb(object, Immediate(kSmiTagMask));
-  Assert(not_equal, "Operand is not a string");
-  push(object);
-  movq(object, FieldOperand(object, HeapObject::kMapOffset));
-  CmpInstanceType(object, FIRST_NONSTRING_TYPE);
-  pop(object);
-  Assert(below, "Operand is not a string");
-}
-
-
 void MacroAssembler::AbortIfNotRootValue(Register src,
                                          Heap::RootListIndex root_value_index,
                                          const char* message) {
@@ -2267,6 +2250,31 @@
 }
 
 
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+  // Load the global or builtins object from the current context.
+  movq(function, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  movq(function, Operand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+                                                  Register map) {
+  // Load the initial map.  The global functions all have initial maps.
+  movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  if (FLAG_debug_code) {
+    Label ok, fail;
+    CheckMap(map, Factory::meta_map(), &fail, false);
+    jmp(&ok);
+    bind(&fail);
+    Abort("Global functions must have initial map");
+    bind(&ok);
+  }
+}
+
+
 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
   // On Windows 64 stack slots are reserved by the caller for all arguments
   // including the ones passed in registers, and space is always allocated for
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 219ae4f..d8f2fba 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -137,12 +137,6 @@
 #endif
 
   // ---------------------------------------------------------------------------
-  // Stack limit support
-
-  // Do simple test for stack overflow. This doesn't handle an overflow.
-  void StackLimitCheck(Label* on_stack_limit_hit);
-
-  // ---------------------------------------------------------------------------
   // Activation frames
 
   void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
@@ -173,6 +167,14 @@
   // register rax (untouched).
   void LeaveApiExitFrame();
 
+  // Push and pop the registers that can hold pointers.
+  void PushSafepointRegisters() { UNIMPLEMENTED(); }
+  void PopSafepointRegisters() { UNIMPLEMENTED(); }
+  static int SafepointRegisterStackIndex(int reg_code) {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -629,9 +631,6 @@
   // Abort execution if argument is not a smi. Used in debug code.
   void AbortIfNotSmi(Register object);
 
-  // Abort execution if argument is a string. Used in debug code.
-  void AbortIfNotString(Register object);
-
   // Abort execution if argument is not the root value with the given index.
   void AbortIfNotRootValue(Register src,
                            Heap::RootListIndex root_value_index,
@@ -773,6 +772,13 @@
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
+  // Load the global function with the given index.
+  void LoadGlobalFunction(int index, Register function);
+
+  // Load the initial map from the global function. The registers
+  // function and map can be the same.
+  void LoadGlobalFunctionInitialMap(Register function, Register map);
+
   // ---------------------------------------------------------------------------
   // Runtime calls
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 7ba482c..63e9769 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -923,22 +923,20 @@
 
 
 MaybeObject* CallStubCompiler::GenerateMissBranch() {
+  MaybeObject* maybe_obj =
+      StubCache::ComputeCallMiss(arguments().immediate(), kind_);
   Object* obj;
-  { MaybeObject* maybe_obj =
-        StubCache::ComputeCallMiss(arguments().immediate(), kind_);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
   return obj;
 }
 
 
-MaybeObject* CallStubCompiler::CompileCallConstant(
-    Object* object,
-    JSObject* holder,
-    JSFunction* function,
-    String* name,
-    StubCompiler::CheckType check) {
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+                                                   JSObject* holder,
+                                                   JSFunction* function,
+                                                   String* name,
+                                                   CheckType check) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -950,8 +948,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder,  NULL, function, name);
     Object* result;
@@ -1467,7 +1465,7 @@
   char_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1539,7 +1537,7 @@
   char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   __ bind(&index_out_of_range);
@@ -1608,7 +1606,7 @@
   char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
-  ICRuntimeCallHelper call_helper;
+  StubRuntimeCallHelper call_helper;
   char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
@@ -1832,8 +1830,8 @@
   // -----------------------------------
 
   SharedFunctionInfo* function_info = function->shared();
-  if (function_info->HasCustomCallGenerator()) {
-    const int id = function_info->custom_call_generator_id();
+  if (function_info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = function_info->builtin_function_id();
     MaybeObject* maybe_result = CompileCustomCall(
         id, object, holder, cell, function, name);
     Object* result;
@@ -2249,6 +2247,52 @@
 }
 
 
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- rax    : key
+  //  -- rdx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  // Check that the map matches.
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &miss);
+
+  // Get the elements array.
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ AssertFastElements(rcx);
+
+  // Check that the key is within bounds.
+  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &miss);
+
+  // Load the result and make sure it's not the hole.
+  SmiIndex index = masm()->SmiToIndex(rbx, rax, kPointerSizeLog2);
+  __ movq(rbx, FieldOperand(rcx,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
+  __ j(equal, &miss);
+  __ movq(rax, rbx);
+  __ ret(0);
+
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
                                                      AccessorInfo* callback,
                                                      String* name) {
@@ -2477,6 +2521,63 @@
 }
 
 
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+    JSObject* receiver) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  Label miss;
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
+  // Check that the map matches.
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rcx, &miss);
+
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
+  __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+         Factory::fixed_array_map());
+  __ j(not_equal, &miss);
+
+  // Check that the key is within bounds.
+  if (receiver->IsJSArray()) {
+    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+    __ j(above_equal, &miss);
+  } else {
+    __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+    __ j(above_equal, &miss);
+  }
+
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ movq(rdx, rax);
+  __ SmiToInteger32(rcx, rcx);
+  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ RecordWrite(rdi, 0, rdx, rcx);
+
+  // Done.
+  __ ret(0);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+  __ jmp(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, NULL);
+}
+
+
 void StubCompiler::GenerateLoadInterceptor(JSObject* object,
                                            JSObject* interceptor_holder,
                                            LookupResult* lookup,
diff --git a/src/zone.h b/src/zone.h
index 3397356..dde722f 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -169,9 +169,19 @@
   // always zero. The capacity must be non-negative.
   explicit ZoneList(int capacity)
       : List<T, ZoneListAllocationPolicy>(capacity) { }
+
+  // Construct a new ZoneList by copying the elements of the given ZoneList.
+  explicit ZoneList(const ZoneList<T>& other)
+      : List<T, ZoneListAllocationPolicy>(other.length()) {
+    AddAll(other);
+  }
 };
 
 
+// Introduce a convenience type for zone lists of map handles.
+typedef ZoneList<Handle<Map> > ZoneMapList;
+
+
 // ZoneScopes keep track of the current parsing and compilation
 // nesting and cleans up generated ASTs in the Zone when exiting the
 // outer-most scope.
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index ba3466d..7038137 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -50,6 +50,7 @@
     'test-dataflow.cc',
     'test-debug.cc',
     'test-decls.cc',
+    'test-deoptimization.cc',
     'test-diy-fp.cc',
     'test-double.cc',
     'test-dtoa.cc',
@@ -69,6 +70,7 @@
     'test-parsing.cc',
     'test-profile-generator.cc',
     'test-regexp.cc',
+    'test-reloc-info.cc',
     'test-serialize.cc',
     'test-sockets.cc',
     'test-spaces.cc',
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
new file mode 100644
index 0000000..aa2b355
--- /dev/null
+++ b/test/cctest/cctest.gyp
@@ -0,0 +1,160 @@
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'target_defaults': {
+    'conditions': [
+      ['OS!="mac"', {
+        # TODO(sgjesse): This is currently copied from v8.gyp, should probably
+        # be refactored.
+        'conditions': [
+          ['v8_target_arch=="arm"', {
+            'defines': [
+              'V8_TARGET_ARCH_ARM',
+            ],
+          }],
+          ['v8_target_arch=="ia32"', {
+            'defines': [
+              'V8_TARGET_ARCH_IA32',
+            ],
+          }],
+          ['v8_target_arch=="x64"', {
+            'defines': [
+              'V8_TARGET_ARCH_X64',
+            ],
+          }],
+        ],
+      }],
+    ],
+  },
+  'targets': [
+    {
+      'target_name': 'cctest',
+      'type': 'executable',
+      'dependencies': [
+        '../../tools/gyp/v8.gyp:v8',
+      ],
+      'include_dirs': [
+        '../../src',
+      ],
+      'sources': [
+        'cctest.cc',
+        'gay-fixed.cc',
+        'gay-precision.cc',
+        'gay-shortest.cc',
+        'test-accessors.cc',
+        'test-alloc.cc',
+        'test-api.cc',
+        'test-ast.cc',
+        'test-bignum.cc',
+        'test-bignum-dtoa.cc',
+        'test-circular-queue.cc',
+        'test-compiler.cc',
+        'test-conversions.cc',
+        'test-cpu-profiler.cc',
+        'test-dataflow.cc',
+        'test-debug.cc',
+        'test-decls.cc',
+        'test-deoptimization.cc',
+        'test-diy-fp.cc',
+        'test-double.cc',
+        'test-dtoa.cc',
+        'test-fast-dtoa.cc',
+        'test-fixed-dtoa.cc',
+        'test-flags.cc',
+        'test-func-name-inference.cc',
+        'test-hashmap.cc',
+        'test-heap.cc',
+        'test-heap-profiler.cc',
+        'test-list.cc',
+        'test-liveedit.cc',
+        'test-lock.cc',
+        'test-log.cc',
+        'test-log-utils.cc',
+        'test-mark-compact.cc',
+        'test-parsing.cc',
+        'test-profile-generator.cc',
+        'test-regexp.cc',
+        'test-reloc-info.cc',
+        'test-serialize.cc',
+        'test-sockets.cc',
+        'test-spaces.cc',
+        'test-strings.cc',
+        'test-strtod.cc',
+        'test-thread-termination.cc',
+        'test-threads.cc',
+        'test-type-info.cc',
+        'test-unbound-queue.cc',
+        'test-utils.cc',
+        'test-version.cc'
+      ],
+      'conditions': [
+        ['v8_target_arch=="ia32"', {
+          'sources': [
+            'test-assembler-ia32.cc',
+            'test-disasm-ia32.cc',
+            'test-log-stack-tracer.cc'
+          ],
+        }],
+        ['v8_target_arch=="x64"', {
+          'sources': [
+            'test-assembler-x64.cc',
+            'test-macro-assembler-x64.cc',
+            'test-log-stack-tracer.cc'
+          ],
+        }],
+        ['v8_target_arch=="arm"', {
+          'sources': [
+            'test-assembler-arm.cc',
+            'test-disasm-arm.cc'
+          ],
+        }],
+        ['v8_target_arch=="mips"', {
+          'sources': [
+            'test-assembler-mips.cc',
+            'test-mips.cc',
+          ],
+        }],
+        [ 'OS=="linux"', {
+          'sources': [
+            'test-platform-linux.cc',
+          ],
+        }],
+        [ 'OS=="mac"', {
+          'sources': [
+            'test-platform-macos.cc',
+          ],
+        }],
+        [ 'OS=="win"', {
+          'sources': [
+            'test-platform-win32.cc',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 895e245..4dfe51a 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -29,19 +29,40 @@
 
 test-api/Bug*: FAIL
 
+
+##############################################################################
 # BUG(281): This test fails on some Linuxes.
 test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux
 
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# BUG(484): This test which we thought was originally corrected in r5236
+# is re-appearing. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
 # These tests always fail.  They are here to test test.py.  If
 # they don't fail then test.py has failed.
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+
+##############################################################################
+[ $arch == x64 ]
+
+# Optimization is currently not working on crankshaft x64 and ARM.
+test-heap/TestInternalWeakLists: PASS || FAIL
+test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL
+
+
+##############################################################################
 [ $arch == arm ]
 
+# Optimization is currently not working on crankshaft x64 and ARM.
+test-heap/TestInternalWeakLists: PASS || FAIL
+test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL
+
 # We cannot assume that we can throw OutOfMemory exceptions in all situations.
 # Apparently our ARM box is in such a state. Skip the test as it also runs for
 # a long time.
@@ -51,6 +72,23 @@
 # BUG(355): Test crashes on ARM.
 test-log/ProfLazyMode: SKIP
 
+# BUG(945): Socket connect fails on ARM
+test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
+test-sockets/Socket: SKIP
+
+
+##############################################################################
+[ $arch == arm && $crankshaft ]
+
+# Tests that fail with crankshaft.
+test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL
+
+# Tests that time out with crankshaft.
+test-debug/ThreadedDebugging: SKIP
+test-debug/DebugBreakLoop: SKIP
+
+
+##############################################################################
 [ $arch == mips ]
 test-accessors: SKIP
 test-alloc: SKIP
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index fcf2ce4..d2a28d7 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -133,7 +133,7 @@
   // Force the creation of an initial map and set the code to
   // something empty.
   Factory::NewJSObject(function);
-  function->set_code(Builtins::builtin(Builtins::EmptyFunction));
+  function->ReplaceCode(Builtins::builtin(Builtins::EmptyFunction));
   // Patch the map to have an accessor for "get".
   Handle<Map> map(function->initial_map());
   Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 4af0c52..9539973 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -814,79 +814,6 @@
 }
 
 
-static void* expected_ptr;
-static v8::Handle<v8::Value> callback(const v8::Arguments& args) {
-  void* ptr = v8::External::Unwrap(args.Data());
-  CHECK_EQ(expected_ptr, ptr);
-  return v8::Boolean::New(true);
-}
-
-
-static void TestExternalPointerWrapping() {
-  v8::HandleScope scope;
-  LocalContext env;
-
-  v8::Handle<v8::Value> data = v8::External::Wrap(expected_ptr);
-
-  v8::Handle<v8::Object> obj = v8::Object::New();
-  obj->Set(v8_str("func"),
-           v8::FunctionTemplate::New(callback, data)->GetFunction());
-  env->Global()->Set(v8_str("obj"), obj);
-
-  CHECK(CompileRun(
-        "function foo() {\n"
-        "  for (var i = 0; i < 13; i++) obj.func();\n"
-        "}\n"
-        "foo(), true")->BooleanValue());
-}
-
-
-THREADED_TEST(ExternalWrap) {
-  // Check heap allocated object.
-  int* ptr = new int;
-  expected_ptr = ptr;
-  TestExternalPointerWrapping();
-  delete ptr;
-
-  // Check stack allocated object.
-  int foo;
-  expected_ptr = &foo;
-  TestExternalPointerWrapping();
-
-  // Check not aligned addresses.
-  const int n = 100;
-  char* s = new char[n];
-  for (int i = 0; i < n; i++) {
-    expected_ptr = s + i;
-    TestExternalPointerWrapping();
-  }
-
-  delete[] s;
-
-  // Check several invalid addresses.
-  expected_ptr = reinterpret_cast<void*>(1);
-  TestExternalPointerWrapping();
-
-  expected_ptr = reinterpret_cast<void*>(0xdeadbeef);
-  TestExternalPointerWrapping();
-
-  expected_ptr = reinterpret_cast<void*>(0xdeadbeef + 1);
-  TestExternalPointerWrapping();
-
-#if defined(V8_HOST_ARCH_X64)
-  // Check a value with a leading 1 bit in x64 Smi encoding.
-  expected_ptr = reinterpret_cast<void*>(0x400000000);
-  TestExternalPointerWrapping();
-
-  expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
-  TestExternalPointerWrapping();
-
-  expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef + 1);
-  TestExternalPointerWrapping();
-#endif
-}
-
-
 THREADED_TEST(FindInstanceInPrototypeChain) {
   v8::HandleScope scope;
   LocalContext env;
@@ -4417,47 +4344,167 @@
 }
 
 
+static int StrCmp16(uint16_t* a, uint16_t* b) {
+  while (true) {
+    if (*a == 0 && *b == 0) return 0;
+    if (*a != *b) return 0 + *a - *b;
+    a++;
+    b++;
+  }
+}
+
+
+static int StrNCmp16(uint16_t* a, uint16_t* b, int n) {
+  while (true) {
+    if (n-- == 0) return 0;
+    if (*a == 0 && *b == 0) return 0;
+    if (*a != *b) return 0 + *a - *b;
+    a++;
+    b++;
+  }
+}
+
+
 THREADED_TEST(StringWrite) {
   v8::HandleScope scope;
   v8::Handle<String> str = v8_str("abcde");
+  // abc<Icelandic eth><Unicode snowman>.
+  v8::Handle<String> str2 = v8_str("abc\303\260\342\230\203");
+
+  CHECK_EQ(5, str2->Length());
 
   char buf[100];
+  char utf8buf[100];
+  uint16_t wbuf[100];
   int len;
+  int charlen;
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, sizeof(utf8buf), &charlen);
+  CHECK_EQ(len, 9);
+  CHECK_EQ(charlen, 5);
+  CHECK_EQ(strcmp(utf8buf, "abc\303\260\342\230\203"), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 8, &charlen);
+  CHECK_EQ(len, 8);
+  CHECK_EQ(charlen, 5);
+  CHECK_EQ(strncmp(utf8buf, "abc\303\260\342\230\203\1", 9), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 7, &charlen);
+  CHECK_EQ(len, 5);
+  CHECK_EQ(charlen, 4);
+  CHECK_EQ(strncmp(utf8buf, "abc\303\260\1", 5), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 6, &charlen);
+  CHECK_EQ(len, 5);
+  CHECK_EQ(charlen, 4);
+  CHECK_EQ(strncmp(utf8buf, "abc\303\260\1", 5), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 5, &charlen);
+  CHECK_EQ(len, 5);
+  CHECK_EQ(charlen, 4);
+  CHECK_EQ(strncmp(utf8buf, "abc\303\260\1", 5), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 4, &charlen);
+  CHECK_EQ(len, 3);
+  CHECK_EQ(charlen, 3);
+  CHECK_EQ(strncmp(utf8buf, "abc\1", 4), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 3, &charlen);
+  CHECK_EQ(len, 3);
+  CHECK_EQ(charlen, 3);
+  CHECK_EQ(strncmp(utf8buf, "abc\1", 4), 0);
+
+  memset(utf8buf, 0x1, sizeof(utf8buf));
+  len = str2->WriteUtf8(utf8buf, 2, &charlen);
+  CHECK_EQ(len, 2);
+  CHECK_EQ(charlen, 2);
+  CHECK_EQ(strncmp(utf8buf, "ab\1", 3), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf);
   CHECK_EQ(len, 5);
-  CHECK_EQ(strncmp("abcde\0", buf, 6), 0);
+  len = str->Write(wbuf);
+  CHECK_EQ(len, 5);
+  CHECK_EQ(strcmp("abcde", buf), 0);
+  uint16_t answer1[] = {'a', 'b', 'c', 'd', 'e', '\0'};
+  CHECK_EQ(StrCmp16(answer1, wbuf), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf, 0, 4);
   CHECK_EQ(len, 4);
+  len = str->Write(wbuf, 0, 4);
+  CHECK_EQ(len, 4);
   CHECK_EQ(strncmp("abcd\1", buf, 5), 0);
+  uint16_t answer2[] = {'a', 'b', 'c', 'd', 0x101};
+  CHECK_EQ(StrNCmp16(answer2, wbuf, 5), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf, 0, 5);
   CHECK_EQ(len, 5);
+  len = str->Write(wbuf, 0, 5);
+  CHECK_EQ(len, 5);
   CHECK_EQ(strncmp("abcde\1", buf, 6), 0);
+  uint16_t answer3[] = {'a', 'b', 'c', 'd', 'e', 0x101};
+  CHECK_EQ(StrNCmp16(answer3, wbuf, 6), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf, 0, 6);
   CHECK_EQ(len, 5);
-  CHECK_EQ(strncmp("abcde\0", buf, 6), 0);
+  len = str->Write(wbuf, 0, 6);
+  CHECK_EQ(len, 5);
+  CHECK_EQ(strcmp("abcde", buf), 0);
+  uint16_t answer4[] = {'a', 'b', 'c', 'd', 'e', '\0'};
+  CHECK_EQ(StrCmp16(answer4, wbuf), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf, 4, -1);
   CHECK_EQ(len, 1);
-  CHECK_EQ(strncmp("e\0", buf, 2), 0);
+  len = str->Write(wbuf, 4, -1);
+  CHECK_EQ(len, 1);
+  CHECK_EQ(strcmp("e", buf), 0);
+  uint16_t answer5[] = {'e', '\0'};
+  CHECK_EQ(StrCmp16(answer5, wbuf), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf, 4, 6);
   CHECK_EQ(len, 1);
-  CHECK_EQ(strncmp("e\0", buf, 2), 0);
+  len = str->Write(wbuf, 4, 6);
+  CHECK_EQ(len, 1);
+  CHECK_EQ(strcmp("e", buf), 0);
+  CHECK_EQ(StrCmp16(answer5, wbuf), 0);
 
   memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
   len = str->WriteAscii(buf, 4, 1);
   CHECK_EQ(len, 1);
+  len = str->Write(wbuf, 4, 1);
+  CHECK_EQ(len, 1);
   CHECK_EQ(strncmp("e\1", buf, 2), 0);
+  uint16_t answer6[] = {'e', 0x101};
+  CHECK_EQ(StrNCmp16(answer6, wbuf, 2), 0);
+
+  memset(buf, 0x1, sizeof(buf));
+  memset(wbuf, 0x1, sizeof(wbuf));
+  len = str->WriteAscii(buf, 3, 1);
+  CHECK_EQ(len, 1);
+  len = str->Write(wbuf, 3, 1);
+  CHECK_EQ(len, 1);
+  CHECK_EQ(strncmp("d\1", buf, 2), 0);
+  uint16_t answer7[] = {'d', 0x101};
+  CHECK_EQ(StrNCmp16(answer7, wbuf, 2), 0);
 }
 
 
@@ -5133,13 +5180,11 @@
 }
 
 
-static bool allowed_access_type[v8::ACCESS_KEYS + 1] = { false };
 static bool NamedAccessBlocker(Local<v8::Object> global,
                                Local<Value> name,
                                v8::AccessType type,
                                Local<Value> data) {
-  return Context::GetCurrent()->Global()->Equals(global) ||
-      allowed_access_type[type];
+  return Context::GetCurrent()->Global()->Equals(global);
 }
 
 
@@ -5147,8 +5192,7 @@
                                  uint32_t key,
                                  v8::AccessType type,
                                  Local<Value> data) {
-  return Context::GetCurrent()->Global()->Equals(global) ||
-      allowed_access_type[type];
+  return Context::GetCurrent()->Global()->Equals(global);
 }
 
 
@@ -5180,7 +5224,7 @@
 }
 
 
-TEST(AccessControl) {
+THREADED_TEST(AccessControl) {
   v8::HandleScope handle_scope;
   v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
 
@@ -5206,27 +5250,6 @@
 
   v8::Handle<v8::Object> global0 = context0->Global();
 
-  // Define a property with JS getter and setter.
-  CompileRun(
-      "function getter() { return 'getter'; };\n"
-      "function setter() { return 'setter'; }\n"
-      "Object.defineProperty(this, 'js_accessor_p', {get:getter, set:setter})");
-
-  Local<Value> getter = global0->Get(v8_str("getter"));
-  Local<Value> setter = global0->Get(v8_str("setter"));
-
-  // And define normal element.
-  global0->Set(239, v8_str("239"));
-
-  // Define an element with JS getter and setter.
-  CompileRun(
-      "function el_getter() { return 'el_getter'; };\n"
-      "function el_setter() { return 'el_setter'; };\n"
-      "Object.defineProperty(this, '42', {get: el_getter, set: el_setter});");
-
-  Local<Value> el_getter = global0->Get(v8_str("el_getter"));
-  Local<Value> el_setter = global0->Get(v8_str("el_setter"));
-
   v8::HandleScope scope1;
 
   v8::Persistent<Context> context1 = Context::New();
@@ -5235,187 +5258,40 @@
   v8::Handle<v8::Object> global1 = context1->Global();
   global1->Set(v8_str("other"), global0);
 
-  // Access blocked property.
-  CompileRun("other.blocked_prop = 1");
-
-  ExpectUndefined("other.blocked_prop");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
-  ExpectFalse("propertyIsEnumerable.call(other, 'blocked_prop')");
-
-  // Enable ACCESS_HAS
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other.blocked_prop");
-  // ... and now we can get the descriptor...
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'blocked_prop').value");
-  // ... and enumerate the property.
-  ExpectTrue("propertyIsEnumerable.call(other, 'blocked_prop')");
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Access blocked element.
-  CompileRun("other[239] = 1");
-
-  ExpectUndefined("other[239]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239')");
-  ExpectFalse("propertyIsEnumerable.call(other, '239')");
-
-  // Enable ACCESS_HAS
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other[239]");
-  // ... and now we can get the descriptor...
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239').value");
-  // ... and enumerate the property.
-  ExpectTrue("propertyIsEnumerable.call(other, '239')");
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Access a property with JS accessor.
-  CompileRun("other.js_accessor_p = 2");
-
-  ExpectUndefined("other.js_accessor_p");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p')");
-
-  // Enable ACCESS_HAS.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other.js_accessor_p");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS and ACCESS_GET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_GET] = true;
-
-  ExpectString("other.js_accessor_p", "getter");
-  ExpectObject(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-
-  allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectUndefined("other.js_accessor_p");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
-  ExpectObject(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-
-  allowed_access_type[v8::ACCESS_SET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_GET] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectString("other.js_accessor_p", "getter");
-  ExpectObject(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
-  ExpectObject(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-
-  allowed_access_type[v8::ACCESS_SET] = false;
-  allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Access an element with JS accessor.
-  CompileRun("other[42] = 2");
-
-  ExpectUndefined("other[42]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42')");
-
-  // Enable ACCESS_HAS.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other[42]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS and ACCESS_GET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_GET] = true;
-
-  ExpectString("other[42]", "el_getter");
-  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-
-  allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectUndefined("other[42]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
-  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-
-  allowed_access_type[v8::ACCESS_SET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_GET] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectString("other[42]", "el_getter");
-  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
-  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-
-  allowed_access_type[v8::ACCESS_SET] = false;
-  allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
   v8::Handle<Value> value;
 
+  // Access blocked property
+  value = v8_compile("other.blocked_prop = 1")->Run();
+  value = v8_compile("other.blocked_prop")->Run();
+  CHECK(value->IsUndefined());
+
+  value = v8_compile("propertyIsEnumerable.call(other, 'blocked_prop')")->Run();
+  CHECK(value->IsFalse());
+
   // Access accessible property
-  value = CompileRun("other.accessible_prop = 3");
+  value = v8_compile("other.accessible_prop = 3")->Run();
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
   CHECK_EQ(3, g_echo_value);
 
-  value = CompileRun("other.accessible_prop");
+  value = v8_compile("other.accessible_prop")->Run();
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
 
-  value = CompileRun(
-      "Object.getOwnPropertyDescriptor(other, 'accessible_prop').value");
-  CHECK(value->IsNumber());
-  CHECK_EQ(3, value->Int32Value());
-
-  value = CompileRun("propertyIsEnumerable.call(other, 'accessible_prop')");
+  value =
+    v8_compile("propertyIsEnumerable.call(other, 'accessible_prop')")->Run();
   CHECK(value->IsTrue());
 
   // Enumeration doesn't enumerate accessors from inaccessible objects in
   // the prototype chain even if the accessors are in themselves accessible.
-  value =
+  Local<Value> result =
       CompileRun("(function(){var obj = {'__proto__':other};"
                  "for (var p in obj)"
                  "   if (p == 'accessible_prop' || p == 'blocked_prop') {"
                  "     return false;"
                  "   }"
                  "return true;})()");
-  CHECK(value->IsTrue());
+  CHECK(result->IsTrue());
 
   context1->Exit();
   context0->Exit();
@@ -5922,6 +5798,22 @@
   instance_template->Set(v8_str("f"),
                          v8::FunctionTemplate::New(InstanceFunctionCallback));
 
+  // The script to check how Crankshaft compiles missing global function
+  // invocations.  function g is not defined and should throw on call.
+  const char* script =
+      "function wrapper(call) {"
+      "  var x = 0, y = 1;"
+      "  for (var i = 0; i < 1000; i++) {"
+      "    x += i * 100;"
+      "    y += i * 100;"
+      "  }"
+      "  if (call) g();"
+      "}"
+      "for (var i = 0; i < 17; i++) wrapper(false);"
+      "var thrown = 0;"
+      "try { wrapper(true); } catch (e) { thrown = 1; };"
+      "thrown";
+
   {
     LocalContext env(NULL, instance_template);
     // Hold on to the global object so it can be used again in another
@@ -5932,6 +5824,8 @@
     CHECK_EQ(42, value->Int32Value());
     value = Script::Compile(v8_str("f()"))->Run();
     CHECK_EQ(12, value->Int32Value());
+    value = Script::Compile(v8_str(script))->Run();
+    CHECK_EQ(1, value->Int32Value());
   }
 
   {
@@ -5941,6 +5835,48 @@
     CHECK_EQ(42, value->Int32Value());
     value = Script::Compile(v8_str("f()"))->Run();
     CHECK_EQ(12, value->Int32Value());
+    value = Script::Compile(v8_str(script))->Run();
+    CHECK_EQ(1, value->Int32Value());
+  }
+}
+
+
+THREADED_TEST(CallKnownGlobalReceiver) {
+  v8::HandleScope handle_scope;
+
+  Local<Value> global_object;
+
+  Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New();
+  Local<ObjectTemplate> instance_template = t->InstanceTemplate();
+
+  // The script to check that we leave global object not
+  // global object proxy on stack when we deoptimize from inside
+  // arguments evaluation.
+  // To provoke error we need to both force deoptimization
+  // from arguments evaluation and to force CallIC to take
+  // CallIC_Miss code path that can't cope with global proxy.
+  const char* script =
+      "function bar(x, y) { try { } finally { } }"
+      "function baz(x) { try { } finally { } }"
+      "function bom(x) { try { } finally { } }"
+      "function foo(x) { bar([x], bom(2)); }"
+      "for (var i = 0; i < 10000; i++) foo(1);"
+      "foo";
+
+  Local<Value> foo;
+  {
+    LocalContext env(NULL, instance_template);
+    // Hold on to the global object so it can be used again in another
+    // environment initialization.
+    global_object = env->Global();
+    foo = Script::Compile(v8_str(script))->Run();
+  }
+
+  {
+    // Create new environment reusing the global object.
+    LocalContext env(NULL, instance_template, global_object);
+    env->Global()->Set(v8_str("foo"), foo);
+    Local<Value> value = Script::Compile(v8_str("foo()"))->Run();
   }
 }
 
@@ -8915,6 +8851,105 @@
 }
 
 
+v8::Handle<v8::String> a;
+v8::Handle<v8::String> h;
+
+static bool NamedGetAccessBlockAandH(Local<v8::Object> obj,
+                                       Local<Value> name,
+                                       v8::AccessType type,
+                                       Local<Value> data) {
+  return !(name->Equals(a) || name->Equals(h));
+}
+
+
+THREADED_TEST(TurnOnAccessCheckAndRecompile) {
+  v8::HandleScope handle_scope;
+
+  // Create an environment with access check to the global object disabled by
+  // default. When the registered access checker will block access to properties
+  // a and h
+  a = v8_str("a");
+  h = v8_str("h");
+  v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+  global_template->SetAccessCheckCallbacks(NamedGetAccessBlockAandH,
+                                           IndexedGetAccessBlocker,
+                                           v8::Handle<v8::Value>(),
+                                           false);
+  v8::Persistent<Context> context = Context::New(NULL, global_template);
+  Context::Scope context_scope(context);
+
+  // Set up a property and a number of functions.
+  context->Global()->Set(v8_str("a"), v8_num(1));
+  static const char* source = "function f1() {return a;}"
+                              "function f2() {return a;}"
+                              "function g1() {return h();}"
+                              "function g2() {return h();}"
+                              "function h() {return 1;}";
+
+  CompileRun(source);
+  Local<Function> f1;
+  Local<Function> f2;
+  Local<Function> g1;
+  Local<Function> g2;
+  Local<Function> h;
+  f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
+  f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
+  g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
+  g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
+  h =  Local<Function>::Cast(context->Global()->Get(v8_str("h")));
+
+  // Get the global object.
+  v8::Handle<v8::Object> global = context->Global();
+
+  // Call f1 one time and f2 a number of times. This will ensure that f1 still
+  // uses the runtime system to retreive property a whereas f2 uses global load
+  // inline cache.
+  CHECK(f1->Call(global, 0, NULL)->Equals(v8_num(1)));
+  for (int i = 0; i < 4; i++) {
+    CHECK(f2->Call(global, 0, NULL)->Equals(v8_num(1)));
+  }
+
+  // Same for g1 and g2.
+  CHECK(g1->Call(global, 0, NULL)->Equals(v8_num(1)));
+  for (int i = 0; i < 4; i++) {
+    CHECK(g2->Call(global, 0, NULL)->Equals(v8_num(1)));
+  }
+
+  // Detach the global and turn on access check now blocking access to property
+  // a and function h.
+  context->DetachGlobal();
+  context->Global()->TurnOnAccessCheck();
+
+  // Failing access check to property get results in undefined.
+  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
+  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
+
+  // Failing access check to function call results in exception.
+  CHECK(g1->Call(global, 0, NULL).IsEmpty());
+  CHECK(g2->Call(global, 0, NULL).IsEmpty());
+
+  // No failing access check when just returning a constant.
+  CHECK(h->Call(global, 0, NULL)->Equals(v8_num(1)));
+
+  // Now compile the source again. And get the newly compiled functions, except
+  // for h for which access is blocked.
+  CompileRun(source);
+  f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
+  f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
+  g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
+  g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
+  CHECK(context->Global()->Get(v8_str("h"))->IsUndefined());
+
+  // Failing access check to property get results in undefined.
+  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
+  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
+
+  // Failing access check to function call results in exception.
+  CHECK(g1->Call(global, 0, NULL).IsEmpty());
+  CHECK(g2->Call(global, 0, NULL).IsEmpty());
+}
+
+
 // This test verifies that pre-compilation (aka preparsing) can be called
 // without initializing the whole VM. Thus we cannot run this test in a
 // multi-threaded setup.
@@ -10766,7 +10801,9 @@
 
 
 // Tests the C++ StackTrace API.
-THREADED_TEST(CaptureStackTrace) {
+// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
+// THREADED_TEST(CaptureStackTrace) {
+TEST(CaptureStackTrace) {
   v8::HandleScope scope;
   v8::Handle<v8::String> origin = v8::String::New("capture-stack-trace-test");
   Local<ObjectTemplate> templ = ObjectTemplate::New();
diff --git a/test/cctest/test-dataflow.cc b/test/cctest/test-dataflow.cc
index 003ac66..5894de2 100644
--- a/test/cctest/test-dataflow.cc
+++ b/test/cctest/test-dataflow.cc
@@ -52,6 +52,24 @@
   }
 
   {
+    BitVector v(64);
+    v.Add(27);
+    v.Add(30);
+    v.Add(31);
+    v.Add(33);
+    BitVector::Iterator iter(&v);
+    CHECK_EQ(27, iter.Current());
+    iter.Advance();
+    CHECK_EQ(30, iter.Current());
+    iter.Advance();
+    CHECK_EQ(31, iter.Current());
+    iter.Advance();
+    CHECK_EQ(33, iter.Current());
+    iter.Advance();
+    CHECK(iter.Done());
+  }
+
+  {
     BitVector v(15);
     v.Add(0);
     BitVector w(15);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 7791185..87f9cab 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,11 +32,13 @@
 #include "v8.h"
 
 #include "api.h"
+#include "cctest.h"
 #include "compilation-cache.h"
 #include "debug.h"
+#include "deoptimizer.h"
 #include "platform.h"
 #include "stub-cache.h"
-#include "cctest.h"
+#include "utils.h"
 
 
 using ::v8::internal::EmbeddedVector;
@@ -515,16 +517,52 @@
 // ---
 
 
-// Source for The JavaScript function which picks out the function name of the
-// top frame.
+// Source for the JavaScript function which picks out the function
+// name of a frame.
 const char* frame_function_name_source =
-    "function frame_function_name(exec_state) {"
-    "  return exec_state.frame(0).func().name();"
+    "function frame_function_name(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).func().name();"
     "}";
 v8::Local<v8::Function> frame_function_name;
 
 
-// Source for The JavaScript function which picks out the source line for the
+// Source for the JavaScript function which pick out the name of the
+// first argument of a frame.
+const char* frame_argument_name_source =
+    "function frame_argument_name(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).argumentName(0);"
+    "}";
+v8::Local<v8::Function> frame_argument_name;
+
+
+// Source for the JavaScript function which pick out the value of the
+// first argument of a frame.
+const char* frame_argument_value_source =
+    "function frame_argument_value(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).argumentValue(0).value_;"
+    "}";
+v8::Local<v8::Function> frame_argument_value;
+
+
+// Source for the JavaScript function which pick out the name of the
+// first argument of a frame.
+const char* frame_local_name_source =
+    "function frame_local_name(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).localName(0);"
+    "}";
+v8::Local<v8::Function> frame_local_name;
+
+
+// Source for the JavaScript function which pick out the value of the
+// first argument of a frame.
+const char* frame_local_value_source =
+    "function frame_local_value(exec_state, frame_number) {"
+    "  return exec_state.frame(frame_number).localValue(0).value_;"
+    "}";
+v8::Local<v8::Function> frame_local_value;
+
+
+// Source for the JavaScript function which picks out the source line for the
 // top frame.
 const char* frame_source_line_source =
     "function frame_source_line(exec_state) {"
@@ -533,7 +571,7 @@
 v8::Local<v8::Function> frame_source_line;
 
 
-// Source for The JavaScript function which picks out the source column for the
+// Source for the JavaScript function which picks out the source column for the
 // top frame.
 const char* frame_source_column_source =
     "function frame_source_column(exec_state) {"
@@ -542,7 +580,7 @@
 v8::Local<v8::Function> frame_source_column;
 
 
-// Source for The JavaScript function which picks out the script name for the
+// Source for the JavaScript function which picks out the script name for the
 // top frame.
 const char* frame_script_name_source =
     "function frame_script_name(exec_state) {"
@@ -551,7 +589,7 @@
 v8::Local<v8::Function> frame_script_name;
 
 
-// Source for The JavaScript function which picks out the script data for the
+// Source for the JavaScript function which picks out the script data for the
 // top frame.
 const char* frame_script_data_source =
     "function frame_script_data(exec_state) {"
@@ -560,7 +598,7 @@
 v8::Local<v8::Function> frame_script_data;
 
 
-// Source for The JavaScript function which picks out the script data from
+// Source for the JavaScript function which picks out the script data from
 // AfterCompile event
 const char* compiled_script_data_source =
     "function compiled_script_data(event_data) {"
@@ -569,7 +607,7 @@
 v8::Local<v8::Function> compiled_script_data;
 
 
-// Source for The JavaScript function which returns the number of frames.
+// Source for the JavaScript function which returns the number of frames.
 static const char* frame_count_source =
     "function frame_count(exec_state) {"
     "  return exec_state.frameCount();"
@@ -603,8 +641,8 @@
     break_point_hit_count++;
     if (!frame_function_name.IsEmpty()) {
       // Get the name of the function.
-      const int argc = 1;
-      v8::Handle<v8::Value> argv[argc] = { exec_state };
+      const int argc = 2;
+      v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
       v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                                argc, argv);
       if (result->IsUndefined()) {
@@ -834,8 +872,8 @@
     // Check that the current function is the expected.
     CHECK(break_point_hit_count <
           StrLength(expected_step_sequence));
-    const int argc = 1;
-    v8::Handle<v8::Value> argv[argc] = { exec_state };
+    const int argc = 2;
+    v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
     v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                              argc, argv);
     CHECK(result->IsString());
@@ -2586,6 +2624,10 @@
   v8::Local<v8::Function> foo = CompileFunction(&env,
                                                 "function foo(){a=1;b=1;c=1;}",
                                                 "foo");
+
+  // Run foo to allow it to get optimized.
+  CompileRun("a=0; b=0; c=0; foo();");
+
   SetBreakPoint(foo, 3);
 
   // Register a debug event listener which steps and counts.
@@ -2635,7 +2677,8 @@
       "    y = 1;\n"
       "    x = a[i];\n"
       "  }\n"
-      "}\n",
+      "}\n"
+      "y=0\n",
       "foo");
 
   // Create array [0,1,2,3,4,5,6,7,8,9]
@@ -2681,7 +2724,8 @@
       "    y = 1;\n"
       "    a[i] = 42;\n"
       "  }\n"
-      "}\n",
+      "}\n"
+      "y=0\n",
       "foo");
 
   // Create array [0,1,2,3,4,5,6,7,8,9]
@@ -2753,15 +2797,12 @@
 }
 
 
-static void DoDebugStepNamedStoreLoop(int expected, bool full_compiler = true) {
+static void DoDebugStepNamedStoreLoop(int expected) {
   v8::HandleScope scope;
   DebugLocalContext env;
 
-  // Register a debug event listener which steps and counts before compiling the
-  // function to ensure the full compiler is used.
-  if (full_compiler) {
-    v8::Debug::SetDebugEventListener(DebugEventStep);
-  }
+  // Register a debug event listener which steps and counts.
+  v8::Debug::SetDebugEventListener(DebugEventStep);
 
   // Create a function for testing stepping of named store.
   v8::Local<v8::Function> foo = CompileFunction(
@@ -2777,12 +2818,6 @@
   // Call function without any break points to ensure inlining is in place.
   foo->Call(env->Global(), 0, NULL);
 
-  // Register a debug event listener which steps and counts after compiling the
-  // function to ensure the optimizing compiler is used.
-  if (!full_compiler) {
-    v8::Debug::SetDebugEventListener(DebugEventStep);
-  }
-
   // Setup break point and step through the function.
   SetBreakPoint(foo, 3);
   step_action = StepNext;
@@ -2798,20 +2833,11 @@
 
 
 // Test of the stepping mechanism for named load in a loop.
-TEST(DebugStepNamedStoreLoopFull) {
-  // With the full compiler it is possible to break on the for statement.
+TEST(DebugStepNamedStoreLoop) {
   DoDebugStepNamedStoreLoop(22);
 }
 
 
-// Test of the stepping mechanism for named load in a loop.
-TEST(DebugStepNamedStoreLoopOptimizing) {
-  // With the optimizing compiler it is not possible to break on the for
-  // statement as it uses a local variable thus no IC's.
-  DoDebugStepNamedStoreLoop(11, false);
-}
-
-
 // Test the stepping mechanism with different ICs.
 TEST(DebugStepLinearMixedICs) {
   v8::HandleScope scope;
@@ -2828,6 +2854,10 @@
       "  var index='name';"
       "  var y = {};"
       "  a=1;b=2;x=a;y[index]=3;x=y[index];bar();}", "foo");
+
+  // Run functions to allow them to get optimized.
+  CompileRun("a=0; b=0; bar(); foo();");
+
   SetBreakPoint(foo, 0);
 
   step_action = StepIn;
@@ -2862,15 +2892,18 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo() { "
                     "  var a;"
                     "  var b = 1;"
                     "  var c = foo;"
                     "  var d = Math.floor;"
                     "  var e = b + d(1.2);"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+
   SetBreakPoint(foo, 0);
 
   // Stepping through the declarations.
@@ -2892,15 +2925,18 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo() { "
                     "  var a,b;"
                     "  a = 1;"
                     "  b = a + 2;"
                     "  b = 1 + 2 + 3;"
                     "  a = Math.floor(b);"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+
   SetBreakPoint(foo, 0);
 
   // Stepping through the declarations.
@@ -2922,7 +2958,8 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  a = 1;"
@@ -2932,7 +2969,8 @@
                     "    c = 1;"
                     "    d = 1;"
                     "  }"
-                    "}";
+                    "}"
+                    "a=0; b=0; c=0; d=0; foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 0);
 
@@ -2963,7 +3001,8 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  a = 1;"
@@ -2979,7 +3018,8 @@
                     "      f = 1;"
                     "      break;"
                     "  }"
-                    "}";
+                    "}"
+                    "a=0; b=0; c=0; d=0; e=0; f=0; foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 0);
 
@@ -3017,14 +3057,16 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
                     "  while (a < x) {"
                     "    a++;"
                     "  }"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 8);  // "var a = 0;"
 
@@ -3033,14 +3075,14 @@
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(10) };
   foo->Call(env->Global(), argc, argv_10);
-  CHECK_EQ(23, break_point_hit_count);
+  CHECK_EQ(22, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(100) };
   foo->Call(env->Global(), argc, argv_100);
-  CHECK_EQ(203, break_point_hit_count);
+  CHECK_EQ(202, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3055,14 +3097,16 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
                     "  do {"
                     "    a++;"
                     "  } while (a < x)"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 8);  // "var a = 0;"
 
@@ -3093,15 +3137,18 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  a = 1;"
                     "  for (i = 0; i < x; i++) {"
                     "    b = 1;"
                     "  }"
-                    "}";
+                    "}"
+                    "a=0; b=0; i=0; foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
+
   SetBreakPoint(foo, 8);  // "a = 1;"
 
   // Looping 10 times.
@@ -3131,7 +3178,8 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
@@ -3144,7 +3192,8 @@
                     "    c++;"
                     "  }"
                     "  return b;"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   v8::Handle<v8::Value> result;
   SetBreakPoint(foo, 8);  // "var a = 0;"
@@ -3180,7 +3229,8 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const int argc = 1;
   const char* src = "function foo(x) { "
                     "  var a = 0;"
@@ -3193,7 +3243,8 @@
                     "    c++;"
                     "  }"
                     "  return b;"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   v8::Handle<v8::Value> result;
   SetBreakPoint(foo, 8);  // "var a = 0;"
@@ -3230,13 +3281,16 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   v8::Local<v8::Function> foo;
   const char* src_1 = "function foo() { "
                       "  var a = [1, 2];"
                       "  for (x in a) {"
                       "    b = 0;"
                       "  }"
-                      "}";
+                      "}"
+                      "foo()";
   foo = CompileFunction(&env, src_1, "foo");
   SetBreakPoint(foo, 0);  // "var a = ..."
 
@@ -3245,12 +3299,15 @@
   foo->Call(env->Global(), 0, NULL);
   CHECK_EQ(6, break_point_hit_count);
 
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src_2 = "function foo() { "
                       "  var a = {a:[1, 2, 3]};"
                       "  for (x in a.a) {"
                       "    b = 0;"
                       "  }"
-                      "}";
+                      "}"
+                      "foo()";
   foo = CompileFunction(&env, src_2, "foo");
   SetBreakPoint(foo, 0);  // "var a = ..."
 
@@ -3272,12 +3329,14 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo(x) { "
                     "  var a = {};"
                     "  with (a) {}"
                     "  with (b) {}"
-                    "}";
+                    "}"
+                    "foo()";
   env->Global()->Set(v8::String::New("b"), v8::Object::New());
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   v8::Handle<v8::Value> result;
@@ -3301,12 +3360,14 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStep);
 
-  // Create a function for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function foo(x) { "
                     "  var a;"
                     "  a = x ? 1 : 2;"
                     "  return a;"
-                    "}";
+                    "}"
+                    "foo()";
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 0);  // "var a;"
 
@@ -3340,10 +3401,12 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStepSequence);
 
-  // Create functions for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function a() {b();c();}; "
                     "function b() {c();}; "
-                    "function c() {}; ";
+                    "function c() {}; "
+                    "a(); b(); c()";
   v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
   SetBreakPoint(a, 0);
 
@@ -3389,11 +3452,13 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStepSequence);
 
-  // Create functions for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function a() {b(c(d()),d());c(d());d()}; "
                     "function b(x,y) {c();}; "
                     "function c(x) {}; "
-                    "function d() {}; ";
+                    "function d() {}; "
+                    "a(); b(); c(); d()";
   v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
   SetBreakPoint(a, 0);
 
@@ -3439,10 +3504,12 @@
   // Register a debug event listener which steps and counts.
   v8::Debug::SetDebugEventListener(DebugEventStepSequence);
 
-  // Create functions for testing stepping.
+  // Create a function for testing stepping. Run it to allow it to get
+  // optimized.
   const char* src = "function a() {b(false);c();}; "
                     "function b(x) {if(x){c();};}; "
-                    "function c() {}; ";
+                    "function c() {}; "
+                    "a(); b(); c()";
   v8::Local<v8::Function> a = CompileFunction(&env, src, "a");
   SetBreakPoint(a, 0);
 
@@ -6116,8 +6183,8 @@
     // Get the name of the top frame function.
     if (!frame_function_name.IsEmpty()) {
       // Get the name of the function.
-      const int argc = 1;
-      v8::Handle<v8::Value> argv[argc] = { exec_state };
+      const int argc = 2;
+      v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
       v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                                argc, argv);
       if (result->IsUndefined()) {
@@ -6846,6 +6913,7 @@
   }
 }
 
+
 // Check that event details contain context where debug event occured.
 TEST(DebugEventBreakData) {
   v8::HandleScope scope;
@@ -6898,6 +6966,156 @@
   CheckDebuggerUnloaded();
 }
 
+static bool debug_event_break_deoptimize_done = false;
+
+static void DebugEventBreakDeoptimize(v8::DebugEvent event,
+                                      v8::Handle<v8::Object> exec_state,
+                                      v8::Handle<v8::Object> event_data,
+                                      v8::Handle<v8::Value> data) {
+  if (event == v8::Break) {
+    if (!frame_function_name.IsEmpty()) {
+      // Get the name of the function.
+      const int argc = 2;
+      v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(0) };
+      v8::Handle<v8::Value> result =
+          frame_function_name->Call(exec_state, argc, argv);
+      if (!result->IsUndefined()) {
+        char fn[80];
+        CHECK(result->IsString());
+        v8::Handle<v8::String> function_name(result->ToString());
+        function_name->WriteAscii(fn);
+        if (strcmp(fn, "bar") == 0) {
+          i::Deoptimizer::DeoptimizeAll();
+          debug_event_break_deoptimize_done = true;
+        }
+      }
+    }
+
+    v8::Debug::DebugBreak();
+  }
+}
+
+
+// Test deoptimization when execution is broken using the debug break stack
+// check interrupt.
+TEST(DeoptimizeDuringDebugBreak) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+  env.ExposeDebug();
+
+  // Create a function for checking the function when hitting a break point.
+  frame_function_name = CompileFunction(&env,
+                                        frame_function_name_source,
+                                        "frame_function_name");
+
+
+  // Set a debug event listener which will keep interrupting execution until
+  // debug break. When inside function bar it will deoptimize all functions.
+  // This tests lazy deoptimization bailout for the stack check, as the first
+  // time in function bar when using debug break and no break points will be at
+  // the initial stack check.
+  v8::Debug::SetDebugEventListener(DebugEventBreakDeoptimize,
+                                   v8::Undefined());
+
+  // Compile and run function bar which will optimize it for some flag settings.
+  v8::Script::Compile(v8::String::New("function bar(){}; bar()"))->Run();
+
+  // Set debug break and call bar again.
+  v8::Debug::DebugBreak();
+  v8::Script::Compile(v8::String::New("bar()"))->Run();
+
+  CHECK(debug_event_break_deoptimize_done);
+
+  v8::Debug::SetDebugEventListener(NULL);
+}
+
+
+static void DebugEventBreakWithOptimizedStack(v8::DebugEvent event,
+                                              v8::Handle<v8::Object> exec_state,
+                                              v8::Handle<v8::Object> event_data,
+                                              v8::Handle<v8::Value> data) {
+  if (event == v8::Break) {
+    if (!frame_function_name.IsEmpty()) {
+      for (int i = 0; i < 2; i++) {
+        const int argc = 2;
+        v8::Handle<v8::Value> argv[argc] = { exec_state, v8::Integer::New(i) };
+        // Get the name of the function in frame i.
+        v8::Handle<v8::Value> result =
+            frame_function_name->Call(exec_state, argc, argv);
+        CHECK(result->IsString());
+        v8::Handle<v8::String> function_name(result->ToString());
+        CHECK(function_name->Equals(v8::String::New("loop")));
+        // Get the name of the first argument in frame i.
+        result = frame_argument_name->Call(exec_state, argc, argv);
+        CHECK(result->IsString());
+        v8::Handle<v8::String> argument_name(result->ToString());
+        CHECK(argument_name->Equals(v8::String::New("count")));
+        // Get the value of the first argument in frame i. If the
+        // funtion is optimized the value will be undefined, otherwise
+        // the value will be '1 - i'.
+        //
+        // TODO(3141533): We should be able to get the real value for
+        // optimized frames.
+        result = frame_argument_value->Call(exec_state, argc, argv);
+        CHECK(result->IsUndefined() || (result->Int32Value() == 1 - i));
+        // Get the name of the first local variable.
+        result = frame_local_name->Call(exec_state, argc, argv);
+        CHECK(result->IsString());
+        v8::Handle<v8::String> local_name(result->ToString());
+        CHECK(local_name->Equals(v8::String::New("local")));
+        // Get the value of the first local variable. If the function
+        // is optimized the value will be undefined, otherwise it will
+        // be 42.
+        //
+        // TODO(3141533): We should be able to get the real value for
+        // optimized frames.
+        result = frame_local_value->Call(exec_state, argc, argv);
+        CHECK(result->IsUndefined() || (result->Int32Value() == 42));
+      }
+    }
+  }
+}
+
+
+static v8::Handle<v8::Value> ScheduleBreak(const v8::Arguments& args) {
+  v8::Debug::SetDebugEventListener(DebugEventBreakWithOptimizedStack,
+                                   v8::Undefined());
+  v8::Debug::DebugBreak();
+  return v8::Undefined();
+}
+
+
+TEST(DebugBreakStackInspection) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+
+  frame_function_name =
+      CompileFunction(&env, frame_function_name_source, "frame_function_name");
+  frame_argument_name =
+      CompileFunction(&env, frame_argument_name_source, "frame_argument_name");
+  frame_argument_value = CompileFunction(&env,
+                                         frame_argument_value_source,
+                                         "frame_argument_value");
+  frame_local_name =
+      CompileFunction(&env, frame_local_name_source, "frame_local_name");
+  frame_local_value =
+      CompileFunction(&env, frame_local_value_source, "frame_local_value");
+
+  v8::Handle<v8::FunctionTemplate> schedule_break_template =
+      v8::FunctionTemplate::New(ScheduleBreak);
+  v8::Handle<v8::Function> schedule_break =
+      schedule_break_template->GetFunction();
+  env->Global()->Set(v8_str("scheduleBreak"), schedule_break);
+
+  const char* src =
+      "function loop(count) {"
+      "  var local = 42;"
+      "  if (count < 1) { scheduleBreak(); loop(count + 1); }"
+      "}"
+      "loop(0);";
+  v8::Script::Compile(v8::String::New(src))->Run();
+}
+
 
 // Test that setting the terminate execution flag during debug break processing.
 static void TestDebugBreakInLoop(const char* loop_head,
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
new file mode 100644
index 0000000..1745355
--- /dev/null
+++ b/test/cctest/test-deoptimization.cc
@@ -0,0 +1,714 @@
+// Copyright 2007-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "compilation-cache.h"
+#include "debug.h"
+#include "deoptimizer.h"
+#include "platform.h"
+#include "stub-cache.h"
+#include "cctest.h"
+
+
+using ::v8::internal::Handle;
+using ::v8::internal::Object;
+using ::v8::internal::JSFunction;
+using ::v8::internal::Deoptimizer;
+using ::v8::internal::EmbeddedVector;
+using ::v8::internal::OS;
+
+// Size of temp buffer for formatting small strings.
+#define SMALL_STRING_BUFFER_SIZE 80
+
+// Utility class to set --allow-natives-syntax --always-opt and --nouse-inlining
+// when constructed and return to their default state when destroyed.
+class AlwaysOptimizeAllowNativesSyntaxNoInlining {
+ public:
+  AlwaysOptimizeAllowNativesSyntaxNoInlining()
+      : always_opt_(i::FLAG_always_opt),
+        allow_natives_syntax_(i::FLAG_allow_natives_syntax),
+        use_inlining_(i::FLAG_use_inlining) {
+    i::FLAG_always_opt = true;
+    i::FLAG_allow_natives_syntax = true;
+    i::FLAG_use_inlining = false;
+  }
+
+  ~AlwaysOptimizeAllowNativesSyntaxNoInlining() {
+    i::FLAG_allow_natives_syntax = allow_natives_syntax_;
+    i::FLAG_always_opt = always_opt_;
+    i::FLAG_use_inlining = use_inlining_;
+  }
+
+ private:
+  bool always_opt_;
+  bool allow_natives_syntax_;
+  bool use_inlining_;
+};
+
+
+// Utility class to set --allow-natives-syntax and --nouse-inlining when
+// constructed and return to their default state when destroyed.
+class AllowNativesSyntaxNoInlining {
+ public:
+  AllowNativesSyntaxNoInlining()
+      : allow_natives_syntax_(i::FLAG_allow_natives_syntax),
+        use_inlining_(i::FLAG_use_inlining) {
+    i::FLAG_allow_natives_syntax = true;
+    i::FLAG_use_inlining = false;
+  }
+
+  ~AllowNativesSyntaxNoInlining() {
+    i::FLAG_allow_natives_syntax = allow_natives_syntax_;
+    i::FLAG_use_inlining = use_inlining_;
+  }
+
+ private:
+  bool allow_natives_syntax_;
+  bool use_inlining_;
+};
+
+
+Handle<JSFunction> GetJSFunction(v8::Handle<v8::Object> obj,
+                                 const char* property_name) {
+  v8::Local<v8::Function> fun =
+      v8::Local<v8::Function>::Cast(obj->Get(v8_str(property_name)));
+  return v8::Utils::OpenHandle(*fun);
+}
+
+
+TEST(DeoptimizeSimple) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Test lazy deoptimization of a simple function.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function h() { %DeoptimizeFunction(f); }"
+        "function g() { count++; h(); }"
+        "function f() { g(); };"
+        "f();"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  // Test lazy deoptimization of a simple function. Call the function after the
+  // deoptimization while it is still activated further down the stack.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function g() { count++; %DeoptimizeFunction(f); f(false); }"
+        "function f(x) { if (x) { g(); } else { return } };"
+        "f(true);"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeSimpleWithArguments) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Test lazy deoptimization of a simple function with some arguments.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function h(x) { %DeoptimizeFunction(f); }"
+        "function g(x, y) { count++; h(x); }"
+        "function f(x, y, z) { g(1,x); y+z; };"
+        "f(1, \"2\", false);"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  // Test lazy deoptimization of a simple function with some arguments. Call the
+  // function after the deoptimization while it is still activated further down
+  // the stack.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function g(x, y) { count++; %DeoptimizeFunction(f); f(false, 1, y); }"
+        "function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
+        "f(true, 1, \"2\");"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeSimpleNested) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Test lazy deoptimization of a simple function. Have a nested function call
+  // do the deoptimization.
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function h(x, y, z) { return x + y + z; }"
+        "function g(z) { count++; %DeoptimizeFunction(f); return z;}"
+        "function f(x,y,z) { return h(x, y, g(z)); };"
+        "result = f(1, 2, 3);"
+        "gc(); gc()");
+
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+  }
+}
+
+
+TEST(DeoptimizeRecursive) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    // Test lazy deoptimization of a simple function called recursively. Call
+    // the function recursively a number of times before deoptimizing it.
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var calls = 0;"
+        "function g() { count++; %DeoptimizeFunction(f); }"
+        "function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
+        "f(10); gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  v8::Local<v8::Function> fun =
+      v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+  Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
+}
+
+
+TEST(DeoptimizeMultiple) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f1);"
+        "               %DeoptimizeFunction(f2);"
+        "               %DeoptimizeFunction(f3);"
+        "               %DeoptimizeFunction(f4);}"
+        "function f4(x) { g(); };"
+        "function f3(x, y, z) { f4(); return x + y + z; };"
+        "function f2(x, y) { return x + f3(y + 1, y + 1, y + 1) + y; };"
+        "function f1(x) { return f2(x + 1, x + 1) + x; };"
+        "result = f1(1);"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeConstructor) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f); }"
+        "function f() {  g(); };"
+        "result = new f() instanceof f;"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f); }"
+        "function f(x, y) { this.x = x; g(); this.y = y; };"
+        "result = new f(1, 2);"
+        "result = result.x + result.y;"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeConstructorMultiple) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  {
+    AlwaysOptimizeAllowNativesSyntaxNoInlining options;
+    CompileRun(
+        "var count = 0;"
+        "var result = 0;"
+        "function g() { count++;"
+        "               %DeoptimizeFunction(f1);"
+        "               %DeoptimizeFunction(f2);"
+        "               %DeoptimizeFunction(f3);"
+        "               %DeoptimizeFunction(f4);}"
+        "function f4(x) { this.result = x; g(); };"
+        "function f3(x, y, z) { this.result = new f4(x + y + z).result; };"
+        "function f2(x, y) {"
+        "    this.result = x + new f3(y + 1, y + 1, y + 1).result + y; };"
+        "function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
+        "result = new f1(1).result;"
+        "gc(); gc()");
+  }
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationADDString) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  const char* f_source = "function f(x, y) { return x + y; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile function f and collect to type feedback to insert binary op stub
+    // call in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.toString = function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'an X'"
+               "};");
+    CompileRun(f_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f('a+', new X());"
+               "};");
+
+    // Compile an optimized version of f.
+    i::FLAG_always_opt = true;
+    CompileRun(f_source);
+    CompileRun("f('a+', new X());");
+    CHECK(!i::V8::UseCrankshaft() ||
+          GetJSFunction(env->Global(), "f")->IsOptimized());
+
+    // Call f and force deoptimization while processing the binary operation.
+    CompileRun("deopt = true;"
+               "var result = f('a+', new X());"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
+  CHECK(result->IsString());
+  v8::String::AsciiValue ascii(result);
+  CHECK_EQ("a+an X", *ascii);
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+static void CompileConstructorWithDeoptimizingValueOf() {
+  CompileRun("var count = 0;"
+             "var result = 0;"
+             "var deopt = false;"
+             "function X() { };"
+             "X.prototype.valueOf = function () {"
+             "  if (deopt) { count++; %DeoptimizeFunction(f); } return 8"
+             "};");
+}
+
+
+static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
+                                         const char* binary_op) {
+  EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> f_source_buffer;
+  OS::SNPrintF(f_source_buffer,
+               "function f(x, y) { return x %s y; };",
+               binary_op);
+  char* f_source = f_source_buffer.start();
+
+  AllowNativesSyntaxNoInlining options;
+  // Compile function f and collect to type feedback to insert binary op stub
+  // call in the optimized code.
+  i::FLAG_prepare_always_opt = true;
+  CompileConstructorWithDeoptimizingValueOf();
+  CompileRun(f_source);
+  CompileRun("for (var i = 0; i < 5; i++) {"
+             "  f(8, new X());"
+             "};");
+
+  // Compile an optimized version of f.
+  i::FLAG_always_opt = true;
+  CompileRun(f_source);
+  CompileRun("f(7, new X());");
+  CHECK(!i::V8::UseCrankshaft() ||
+        GetJSFunction((*env)->Global(), "f")->IsOptimized());
+
+  // Call f and force deoptimization while processing the binary operation.
+  CompileRun("deopt = true;"
+             "var result = f(7, new X());"
+             "gc(); gc();");
+
+  CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
+}
+
+
+TEST(DeoptimizeBinaryOperationADD) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "+");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationSUB) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "-");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationMUL) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "*");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationDIV) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "/");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeBinaryOperationMOD) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  TestDeoptimizeBinaryOpHelper(&env, "%");
+
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeCompare) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  const char* f_source = "function f(x, y) { return x < y; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile function f and collect to type feedback to insert compare ic
+    // call in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.toString = function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'b'"
+               "};");
+    CompileRun(f_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f('a', new X());"
+               "};");
+
+    // Compile an optimized version of f.
+    i::FLAG_always_opt = true;
+    CompileRun(f_source);
+    CompileRun("f('a', new X());");
+    CHECK(!i::V8::UseCrankshaft() ||
+          GetJSFunction(env->Global(), "f")->IsOptimized());
+
+    // Call f and force deoptimization while processing the comparison.
+    CompileRun("deopt = true;"
+               "var result = f('a', new X());"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeLoadICStoreIC) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Functions to generate load/store/keyed load/keyed store IC calls.
+  const char* f1_source = "function f1(x) { return x.y; };";
+  const char* g1_source = "function g1(x) { x.y = 1; };";
+  const char* f2_source = "function f2(x, y) { return x[y]; };";
+  const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile functions and collect to type feedback to insert ic
+    // calls in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.__defineGetter__('y', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f1); };"
+               "  return 13;"
+               "});"
+               "X.prototype.__defineSetter__('y', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(g1); };"
+               "});"
+               "X.prototype.__defineGetter__('z', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(f2); };"
+               "  return 13;"
+               "});"
+               "X.prototype.__defineSetter__('z', function () {"
+               "  if (deopt) { count++; %DeoptimizeFunction(g2); };"
+               "});");
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f1(new X());"
+               "  g1(new X());"
+               "  f2(new X(), 'z');"
+               "  g2(new X(), 'z');"
+               "};");
+
+    // Compile an optimized version of the functions.
+    i::FLAG_always_opt = true;
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("f1(new X());");
+    CompileRun("g1(new X());");
+    CompileRun("f2(new X(), 'z');");
+    CompileRun("g2(new X(), 'z');");
+    if (i::V8::UseCrankshaft()) {
+      CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+    }
+
+    // Call functions and force deoptimization while processing the ics.
+    CompileRun("deopt = true;"
+               "var result = f1(new X());"
+               "g1(new X());"
+               "f2(new X(), 'z');"
+               "g2(new X(), 'z');"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+  CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
+
+
+TEST(DeoptimizeLoadICStoreICNested) {
+  v8::HandleScope scope;
+  const char* extension_list[] = { "v8/gc" };
+  v8::ExtensionConfiguration extensions(1, extension_list);
+  LocalContext env(&extensions);
+
+  // Functions to generate load/store/keyed load/keyed store IC calls.
+  const char* f1_source = "function f1(x) { return x.y; };";
+  const char* g1_source = "function g1(x) { x.y = 1; };";
+  const char* f2_source = "function f2(x, y) { return x[y]; };";
+  const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+  {
+    AllowNativesSyntaxNoInlining options;
+    // Compile functions and collect to type feedback to insert ic
+    // calls in the optimized code.
+    i::FLAG_prepare_always_opt = true;
+    CompileRun("var count = 0;"
+               "var result = 0;"
+               "var deopt = false;"
+               "function X() { };"
+               "X.prototype.__defineGetter__('y', function () {"
+               "  g1(this);"
+               "  return 13;"
+               "});"
+               "X.prototype.__defineSetter__('y', function () {"
+               "  f2(this, 'z');"
+               "});"
+               "X.prototype.__defineGetter__('z', function () {"
+               "  g2(this, 'z');"
+               "});"
+               "X.prototype.__defineSetter__('z', function () {"
+               "  if (deopt) {"
+               "    count++;"
+               "    %DeoptimizeFunction(f1);"
+               "    %DeoptimizeFunction(g1);"
+               "    %DeoptimizeFunction(f2);"
+               "    %DeoptimizeFunction(g2); };"
+               "});");
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("for (var i = 0; i < 5; i++) {"
+               "  f1(new X());"
+               "  g1(new X());"
+               "  f2(new X(), 'z');"
+               "  g2(new X(), 'z');"
+               "};");
+
+    // Compile an optimized version of the functions.
+    i::FLAG_always_opt = true;
+    CompileRun(f1_source);
+    CompileRun(g1_source);
+    CompileRun(f2_source);
+    CompileRun(g2_source);
+    CompileRun("f1(new X());");
+    CompileRun("g1(new X());");
+    CompileRun("f2(new X(), 'z');");
+    CompileRun("g2(new X(), 'z');");
+    if (i::V8::UseCrankshaft()) {
+      CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+      CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+    }
+
+    // Call functions and force deoptimization while processing the ics.
+    CompileRun("deopt = true;"
+               "var result = f1(new X());"
+               "gc(); gc();");
+  }
+
+  CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+  CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+  CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount());
+}
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 95314d7..ad242fe 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -1193,4 +1193,69 @@
   CHECK_EQ(1, stream.eos_signaled());
 }
 
+
+TEST(HeapSnapshotGetNodeById) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8::String::New("id"));
+  const v8::HeapGraphNode* root = snapshot->GetRoot();
+  CHECK_EQ(root, snapshot->GetNodeById(root->GetId()));
+  for (int i = 0, count = root->GetChildrenCount(); i < count; ++i) {
+    const v8::HeapGraphEdge* prop = root->GetChild(i);
+    CHECK_EQ(
+        prop->GetToNode(), snapshot->GetNodeById(prop->GetToNode()->GetId()));
+  }
+  // Check a big id, which should not exist yet.
+  CHECK_EQ(NULL, snapshot->GetNodeById(0x1000000UL));
+}
+
+
+namespace {
+
+class TestActivityControl : public v8::ActivityControl {
+ public:
+  explicit TestActivityControl(int abort_count)
+      : done_(0), total_(0), abort_count_(abort_count) {}
+  ControlOption ReportProgressValue(int done, int total) {
+    done_ = done;
+    total_ = total;
+    return --abort_count_ != 0 ? kContinue : kAbort;
+  }
+  int done() { return done_; }
+  int total() { return total_; }
+
+ private:
+  int done_;
+  int total_;
+  int abort_count_;
+};
+}
+
+TEST(TakeHeapSnapshotAborting) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  const int snapshots_count = v8::HeapProfiler::GetSnapshotsCount();
+  TestActivityControl aborting_control(3);
+  const v8::HeapSnapshot* no_snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8::String::New("abort"),
+                                     v8::HeapSnapshot::kFull,
+                                     &aborting_control);
+  CHECK_EQ(NULL, no_snapshot);
+  CHECK_EQ(snapshots_count, v8::HeapProfiler::GetSnapshotsCount());
+  CHECK_GT(aborting_control.total(), aborting_control.done());
+
+  TestActivityControl control(-1);  // Don't abort.
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8::String::New("full"),
+                                     v8::HeapSnapshot::kFull,
+                                     &control);
+  CHECK_NE(NULL, snapshot);
+  CHECK_EQ(snapshots_count + 1, v8::HeapProfiler::GetSnapshotsCount());
+  CHECK_EQ(control.total(), control.done());
+  CHECK_GT(control.total(), 0);
+}
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index fbe66ec..a23ee17 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -511,7 +511,7 @@
     if (!maybe_a->ToObject(&a)) continue;
     CHECK(a->IsSymbol());
     Object* b;
-    MaybeObject* maybe_b = Heap::LookupAsciiSymbol(string);
+    MaybeObject *maybe_b = Heap::LookupAsciiSymbol(string);
     if (!maybe_b->ToObject(&b)) continue;
     CHECK_EQ(b, a);
     CHECK(String::cast(b)->IsEqualTo(CStrVector(string)));
@@ -978,7 +978,9 @@
   Handle<String> foo_name = Factory::LookupAsciiSymbol("foo");
 
   // This compile will add the code to the compilation cache.
-  CompileRun(source);
+  { v8::HandleScope scope;
+    CompileRun(source);
+  }
 
   // Check function is compiled.
   Object* func_value =
@@ -1000,8 +1002,8 @@
   Heap::CollectAllGarbage(true);
 
   // foo should no longer be in the compilation cache
-  CHECK(!function->shared()->is_compiled());
-  CHECK(!function->is_compiled());
+  CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+  CHECK(!function->is_compiled() || function->IsOptimized());
   // Call foo to get it recompiled.
   CompileRun("foo()");
   CHECK(function->shared()->is_compiled());
@@ -1021,6 +1023,20 @@
 }
 
 
+// Count the number of user functions in the weak list of optimized
+// functions attached to a global context.
+static int CountOptimizedUserFunctions(v8::Handle<v8::Context> context) {
+  int count = 0;
+  Handle<Context> icontext = v8::Utils::OpenHandle(*context);
+  Object* object = icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  while (object->IsJSFunction() && !JSFunction::cast(object)->IsBuiltin()) {
+    count++;
+    object = JSFunction::cast(object)->next_function_link();
+  }
+  return count;
+}
+
+
 TEST(TestInternalWeakLists) {
   static const int kNumTestContexts = 10;
 
@@ -1032,9 +1048,63 @@
   // Create a number of global contests which gets linked together.
   for (int i = 0; i < kNumTestContexts; i++) {
     ctx[i] = v8::Context::New();
+
+    bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
+
     CHECK_EQ(i + 1, CountGlobalContexts());
 
     ctx[i]->Enter();
+
+    // Create a handle scope so no function objects get stuch in the outer
+    // handle scope
+    v8::HandleScope scope;
+    const char* source = "function f1() { };"
+                         "function f2() { };"
+                         "function f3() { };"
+                         "function f4() { };"
+                         "function f5() { };";
+    CompileRun(source);
+    CHECK_EQ(0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f1()");
+    CHECK_EQ(opt ? 1 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f2()");
+    CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f3()");
+    CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f4()");
+    CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f5()");
+    CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
+
+    // Remove function f1, and
+    CompileRun("f1=null");
+
+    // Scavenge treats these references as strong.
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[i]));
+    }
+
+    // Mark compact handles the weak references.
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
+
+    // Get rid of f3 and f5 in the same way.
+    CompileRun("f3=null");
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
+    }
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
+    CompileRun("f5=null");
+    for (int j = 0; j < 10; j++) {
+      Heap::PerformScavenge();
+      CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
+    }
+    Heap::CollectAllGarbage(true);
+    CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
+
     ctx[i]->Exit();
   }
 
@@ -1076,6 +1146,25 @@
 }
 
 
+// Count the number of user functions in the weak list of optimized
+// functions attached to a global context causing a GC after the
+// specified number of elements.
+static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
+                                             int n) {
+  int count = 0;
+  Handle<Context> icontext = v8::Utils::OpenHandle(*context);
+  Handle<Object> object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+  while (object->IsJSFunction() &&
+         !Handle<JSFunction>::cast(object)->IsBuiltin()) {
+    count++;
+    if (count == n) Heap::CollectAllGarbage(true);
+    object = Handle<Object>(
+        Object::cast(JSFunction::cast(*object)->next_function_link()));
+  }
+  return count;
+}
+
+
 TEST(TestInternalWeakListsTraverseWithGC) {
   static const int kNumTestContexts = 10;
 
@@ -1090,17 +1179,44 @@
     ctx[i] = v8::Context::New();
     CHECK_EQ(i + 1, CountGlobalContexts());
     CHECK_EQ(i + 1, CountGlobalContextsWithGC(i / 2 + 1));
-
-    ctx[i]->Enter();
-    ctx[i]->Exit();
   }
+
+  bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
+
+  // Compile a number of functions the length of the weak list of optimized
+  // functions both with and without GCs while iterating the list.
+  ctx[0]->Enter();
+  const char* source = "function f1() { };"
+                       "function f2() { };"
+                       "function f3() { };"
+                       "function f4() { };"
+                       "function f5() { };";
+  CompileRun(source);
+  CHECK_EQ(0, CountOptimizedUserFunctions(ctx[0]));
+  CompileRun("f1()");
+  CHECK_EQ(opt ? 1 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 1 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
+  CompileRun("f2()");
+  CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
+  CompileRun("f3()");
+  CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 1));
+  CompileRun("f4()");
+  CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 2));
+  CompileRun("f5()");
+  CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctions(ctx[0]));
+  CHECK_EQ(opt ? 5 : 0, CountOptimizedUserFunctionsWithGC(ctx[0], 4));
+
+  ctx[0]->Exit();
 }
 
 
 TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
   InitializeVM();
   intptr_t size_of_objects_1 = Heap::SizeOfObjects();
-  HeapIterator iterator(HeapIterator::kPreciseFiltering);
+  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
   intptr_t size_of_objects_2 = 0;
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -1124,3 +1240,65 @@
     CHECK_GT(size_of_objects_2 / 100, delta);
   }
 }
+
+
+class HeapIteratorTestHelper {
+ public:
+  HeapIteratorTestHelper(Object* a, Object* b)
+      : a_(a), b_(b), a_found_(false), b_found_(false) {}
+  bool a_found() { return a_found_; }
+  bool b_found() { return b_found_; }
+  void IterateHeap(HeapIterator::HeapObjectsFiltering mode) {
+    HeapIterator iterator(mode);
+    for (HeapObject* obj = iterator.next();
+         obj != NULL;
+         obj = iterator.next()) {
+      if (obj == a_)
+        a_found_ = true;
+      else if (obj == b_)
+        b_found_ = true;
+    }
+  }
+ private:
+  Object* a_;
+  Object* b_;
+  bool a_found_;
+  bool b_found_;
+};
+
+TEST(HeapIteratorFilterUnreachable) {
+  InitializeVM();
+  v8::HandleScope scope;
+  CompileRun("a = {}; b = {};");
+  v8::Handle<Object> a(Top::context()->global()->GetProperty(
+      *Factory::LookupAsciiSymbol("a"))->ToObjectChecked());
+  v8::Handle<Object> b(Top::context()->global()->GetProperty(
+      *Factory::LookupAsciiSymbol("b"))->ToObjectChecked());
+  CHECK_NE(*a, *b);
+  {
+    HeapIteratorTestHelper helper(*a, *b);
+    helper.IterateHeap(HeapIterator::kFilterUnreachable);
+    CHECK(helper.a_found());
+    CHECK(helper.b_found());
+  }
+  CHECK(Top::context()->global()->DeleteProperty(
+      *Factory::LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
+  // We ensure that GC will not happen, so our raw pointer stays valid.
+  AssertNoAllocation no_alloc;
+  Object* a_saved = *a;
+  a.Clear();
+  // Verify that "a" object still resides in the heap...
+  {
+    HeapIteratorTestHelper helper(a_saved, *b);
+    helper.IterateHeap(HeapIterator::kNoFiltering);
+    CHECK(helper.a_found());
+    CHECK(helper.b_found());
+  }
+  // ...but is now unreachable.
+  {
+    HeapIteratorTestHelper helper(a_saved, *b);
+    helper.IterateHeap(HeapIterator::kFilterUnreachable);
+    CHECK(!helper.a_found());
+    CHECK(helper.b_found());
+  }
+}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 65be6bd..c85f6c0 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -39,6 +39,7 @@
 #include "cctest.h"
 #include "disassembler.h"
 #include "register-allocator-inl.h"
+#include "vm-state-inl.h"
 
 using v8::Function;
 using v8::Local;
@@ -200,6 +201,7 @@
 
 
 static void CheckJSFunctionAtAddress(const char* func_name, Address addr) {
+  CHECK(i::Heap::Contains(addr));
   i::Object* obj = i::HeapObject::FromAddress(addr);
   CHECK(obj->IsJSFunction());
   CHECK(JSFunction::cast(obj)->shared()->name()->IsString());
@@ -298,10 +300,17 @@
   //       trace(EBP) [native (extension)]
   //         DoTrace(EBP) [native]
   //           StackTracer::Trace
-  CHECK_GT(sample.frames_count, 1);
+
+  // The VM state tracking keeps track of external callbacks and puts
+  // them at the top of the sample stack.
+  int base = 0;
+  CHECK(sample.stack[0] == FUNCTION_ADDR(TraceExtension::Trace));
+  base++;
+
   // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
-  CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[0]);
-  CheckJSFunctionAtAddress("JSTrace", sample.stack[1]);
+  CHECK_GT(sample.frames_count, base + 1);
+  CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[base + 0]);
+  CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 1]);
 }
 
 
@@ -311,6 +320,10 @@
 // Top::c_entry_fp value. In this case, StackTracer uses passed frame
 // pointer value as a starting point for stack walking.
 TEST(PureJSStackTrace) {
+  // This test does not pass with inlining enabled since inlined functions
+  // don't appear in the stack trace.
+  i::FLAG_use_inlining = false;
+
   TickSample sample;
   InitTraceEnv(&sample);
 
@@ -341,10 +354,17 @@
   // The last JS function called. It is only visible through
   // sample.function, as its return address is above captured EBP value.
   CheckJSFunctionAtAddress("JSFuncDoTrace", sample.function);
-  CHECK_GT(sample.frames_count, 1);
+
+  // The VM state tracking keeps track of external callbacks and puts
+  // them at the top of the sample stack.
+  int base = 0;
+  CHECK(sample.stack[0] == FUNCTION_ADDR(TraceExtension::JSTrace));
+  base++;
+
   // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
-  CheckJSFunctionAtAddress("JSTrace", sample.stack[0]);
-  CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[1]);
+  CHECK_GT(sample.frames_count, base + 1);
+  CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 0]);
+  CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[base + 1]);
 }
 
 
diff --git a/test/cctest/test-log-utils.cc b/test/cctest/test-log-utils.cc
index c99d770..861be12 100644
--- a/test/cctest/test-log-utils.cc
+++ b/test/cctest/test-log-utils.cc
@@ -12,7 +12,6 @@
 using v8::internal::CStrVector;
 using v8::internal::EmbeddedVector;
 using v8::internal::LogDynamicBuffer;
-using v8::internal::LogRecordCompressor;
 using v8::internal::MutableCStrVector;
 using v8::internal::ScopedVector;
 using v8::internal::Vector;
@@ -138,173 +137,4 @@
   CHECK_EQ(0, ReadData(&dynabuf, 100 + seal_size, &buf));
 }
 
-
-TEST(CompressorStore) {
-  LogRecordCompressor comp(2);
-  const Vector<const char> empty = CStrVector("");
-  CHECK(comp.Store(empty));
-  CHECK(!comp.Store(empty));
-  CHECK(!comp.Store(empty));
-  const Vector<const char> aaa = CStrVector("aaa");
-  CHECK(comp.Store(aaa));
-  CHECK(!comp.Store(aaa));
-  CHECK(!comp.Store(aaa));
-  CHECK(comp.Store(empty));
-  CHECK(!comp.Store(empty));
-  CHECK(!comp.Store(empty));
-}
-
-
-void CheckCompression(LogRecordCompressor* comp,
-                      const Vector<const char>& after) {
-  EmbeddedVector<char, 100> result;
-  CHECK(comp->RetrievePreviousCompressed(&result));
-  CHECK_EQ(after, result);
-}
-
-
-void CheckCompression(LogRecordCompressor* comp,
-                      const char* after) {
-  CheckCompression(comp, CStrVector(after));
-}
-
-
-TEST(CompressorNonCompressed) {
-  LogRecordCompressor comp(0);
-  CHECK(!comp.RetrievePreviousCompressed(NULL));
-  const Vector<const char> empty = CStrVector("");
-  CHECK(comp.Store(empty));
-  CHECK(!comp.RetrievePreviousCompressed(NULL));
-  const Vector<const char> a_x_20 = CStrVector("aaaaaaaaaaaaaaaaaaaa");
-  CHECK(comp.Store(a_x_20));
-  CheckCompression(&comp, empty);
-  CheckCompression(&comp, empty);
-  CHECK(comp.Store(empty));
-  CheckCompression(&comp, a_x_20);
-  CheckCompression(&comp, a_x_20);
-}
-
-
-TEST(CompressorSingleLine) {
-  LogRecordCompressor comp(1);
-  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_1));
-  const Vector<const char> string_2 = CStrVector("fff,ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_2));
-  // string_1 hasn't been compressed.
-  CheckCompression(&comp, string_1);
-  CheckCompression(&comp, string_1);
-  const Vector<const char> string_3 = CStrVector("hhh,ggg,ccc,bbb,aaa");
-  CHECK(comp.Store(string_3));
-  // string_2 compressed using string_1.
-  CheckCompression(&comp, "fff#1:3");
-  CheckCompression(&comp, "fff#1:3");
-  CHECK(!comp.Store(string_3));
-  // Expecting no changes.
-  CheckCompression(&comp, "fff#1:3");
-  CHECK(!comp.Store(string_3));
-  // Expecting no changes.
-  CheckCompression(&comp, "fff#1:3");
-  const Vector<const char> string_4 = CStrVector("iii,hhh,ggg,ccc,bbb,aaa");
-  CHECK(comp.Store(string_4));
-  // string_3 compressed using string_2.
-  CheckCompression(&comp, "hhh,ggg#1:7");
-  const Vector<const char> string_5 = CStrVector("nnn,mmm,lll,kkk,jjj");
-  CHECK(comp.Store(string_5));
-  // string_4 compressed using string_3.
-  CheckCompression(&comp, "iii,#1");
-  const Vector<const char> string_6 = CStrVector("nnn,mmmmmm,lll,kkk,jjj");
-  CHECK(comp.Store(string_6));
-  // string_5 hasn't been compressed.
-  CheckCompression(&comp, string_5);
-  CHECK(comp.Store(string_5));
-  // string_6 compressed using string_5.
-  CheckCompression(&comp, "nnn,mmm#1:4");
-  const Vector<const char> string_7 = CStrVector("nnnnnn,mmm,lll,kkk,jjj");
-  CHECK(comp.Store(string_7));
-  // string_5 compressed using string_6.
-  CheckCompression(&comp, "nnn,#1:7");
-  const Vector<const char> string_8 = CStrVector("xxn,mmm,lll,kkk,jjj");
-  CHECK(comp.Store(string_8));
-  // string_7 compressed using string_5.
-  CheckCompression(&comp, "nnn#1");
-  const Vector<const char> string_9 =
-      CStrVector("aaaaaaaaaaaaa,bbbbbbbbbbbbbbbbb");
-  CHECK(comp.Store(string_9));
-  // string_8 compressed using string_7.
-  CheckCompression(&comp, "xx#1:5");
-  const Vector<const char> string_10 =
-      CStrVector("aaaaaaaaaaaaa,cccccccbbbbbbbbbb");
-  CHECK(comp.Store(string_10));
-  // string_9 hasn't been compressed.
-  CheckCompression(&comp, string_9);
-  CHECK(comp.Store(string_1));
-  // string_10 compressed using string_9.
-  CheckCompression(&comp, "aaaaaaaaaaaaa,ccccccc#1:21");
-}
-
-
-
-TEST(CompressorMultiLines) {
-  const int kWindowSize = 3;
-  LogRecordCompressor comp(kWindowSize);
-  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_1));
-  const Vector<const char> string_2 = CStrVector("iii,hhh,ggg,fff,aaa");
-  CHECK(comp.Store(string_2));
-  const Vector<const char> string_3 = CStrVector("mmm,lll,kkk,jjj,aaa");
-  CHECK(comp.Store(string_3));
-  const Vector<const char> string_4 = CStrVector("nnn,hhh,ggg,fff,aaa");
-  CHECK(comp.Store(string_4));
-  const Vector<const char> string_5 = CStrVector("ooo,lll,kkk,jjj,aaa");
-  CHECK(comp.Store(string_5));
-  // string_4 compressed using string_2.
-  CheckCompression(&comp, "nnn#2:3");
-  CHECK(comp.Store(string_1));
-  // string_5 compressed using string_3.
-  CheckCompression(&comp, "ooo#2:3");
-  CHECK(comp.Store(string_4));
-  // string_1 is out of buffer by now, so it shouldn't be compressed.
-  CHECK_GE(3, kWindowSize);
-  CheckCompression(&comp, string_1);
-  CHECK(comp.Store(string_2));
-  // string_4 compressed using itself.
-  CheckCompression(&comp, "#3");
-}
-
-
-TEST(CompressorBestSelection) {
-  LogRecordCompressor comp(3);
-  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_1));
-  const Vector<const char> string_2 = CStrVector("ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_2));
-  const Vector<const char> string_3 = CStrVector("fff,eee,ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_3));
-  // string_2 compressed using string_1.
-  CheckCompression(&comp, "#1:4");
-  const Vector<const char> string_4 = CStrVector("nnn,hhh,ggg,fff,aaa");
-  CHECK(comp.Store(string_4));
-  // Compressing string_3 using string_1 gives a better compression than
-  // using string_2.
-  CheckCompression(&comp, "fff,#2");
-}
-
-
-TEST(CompressorCompressibility) {
-  LogRecordCompressor comp(2);
-  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
-  CHECK(comp.Store(string_1));
-  const Vector<const char> string_2 = CStrVector("ccc,bbb,aaa");
-  CHECK(comp.Store(string_2));
-  const Vector<const char> string_3 = CStrVector("aaa");
-  CHECK(comp.Store(string_3));
-  // string_2 compressed using string_1.
-  CheckCompression(&comp, "#1:8");
-  const Vector<const char> string_4 = CStrVector("xxx");
-  CHECK(comp.Store(string_4));
-  // string_3 can't be compressed using string_2 --- too short.
-  CheckCompression(&comp, string_3);
-}
-
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 710c10e..503e0cf 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -16,6 +16,7 @@
 #include "cpu-profiler.h"
 #include "v8threads.h"
 #include "cctest.h"
+#include "vm-state-inl.h"
 
 using v8::internal::Address;
 using v8::internal::EmbeddedVector;
@@ -246,7 +247,8 @@
 
 
 static void CheckThatProfilerWorks(LogBufferMatcher* matcher) {
-  CHECK(!LoggerTestHelper::IsSamplerActive());
+  CHECK(i::RuntimeProfiler::IsEnabled() ||
+        !LoggerTestHelper::IsSamplerActive());
   LoggerTestHelper::ResetSamplesTaken();
 
   Logger::ResumeProfiler(v8::PROFILER_MODULE_CPU, 0);
@@ -272,7 +274,8 @@
   }
 
   Logger::PauseProfiler(v8::PROFILER_MODULE_CPU, 0);
-  CHECK(!LoggerTestHelper::IsSamplerActive());
+  CHECK(i::RuntimeProfiler::IsEnabled() ||
+        !LoggerTestHelper::IsSamplerActive());
 
   // Wait 50 msecs to allow Profiler thread to process the last
   // tick sample it has got.
@@ -291,8 +294,12 @@
 TEST(ProfLazyMode) {
   ScopedLoggerInitializer initialize_logger(true);
 
-  // No sampling should happen prior to resuming profiler.
-  CHECK(!LoggerTestHelper::IsSamplerActive());
+  if (!i::V8::UseCrankshaft()) return;
+
+  // No sampling should happen prior to resuming profiler unless we
+  // are runtime profiling.
+  CHECK(i::RuntimeProfiler::IsEnabled() ||
+        !LoggerTestHelper::IsSamplerActive());
 
   LogBufferMatcher matcher;
   // Nothing must be logged until profiling is resumed.
@@ -403,7 +410,7 @@
 class TestSampler : public v8::internal::Sampler {
  public:
   TestSampler()
-      : Sampler(0, true),
+      : Sampler(0, true, true),
         semaphore_(v8::internal::OS::CreateSemaphore(0)),
         was_sample_stack_called_(false) {
   }
@@ -431,30 +438,38 @@
 }  // namespace
 
 TEST(ProfMultipleThreads) {
+  TestSampler* sampler = NULL;
+  {
+    v8::Locker locker;
+    sampler = new TestSampler();
+    sampler->Start();
+    CHECK(sampler->IsActive());
+  }
+
   LoopingJsThread jsThread;
   jsThread.Start();
   LoopingNonJsThread nonJsThread;
   nonJsThread.Start();
 
-  TestSampler sampler;
-  sampler.Start();
-  CHECK(!sampler.WasSampleStackCalled());
+  CHECK(!sampler->WasSampleStackCalled());
   jsThread.WaitForRunning();
   jsThread.SendSigProf();
-  CHECK(sampler.WaitForTick());
-  CHECK(sampler.WasSampleStackCalled());
-  sampler.Reset();
-  CHECK(!sampler.WasSampleStackCalled());
+  CHECK(sampler->WaitForTick());
+  CHECK(sampler->WasSampleStackCalled());
+  sampler->Reset();
+  CHECK(!sampler->WasSampleStackCalled());
   nonJsThread.WaitForRunning();
   nonJsThread.SendSigProf();
-  CHECK(!sampler.WaitForTick());
-  CHECK(!sampler.WasSampleStackCalled());
-  sampler.Stop();
+  CHECK(!sampler->WaitForTick());
+  CHECK(!sampler->WasSampleStackCalled());
+  sampler->Stop();
 
   jsThread.Stop();
   nonJsThread.Stop();
   jsThread.Join();
   nonJsThread.Join();
+
+  delete sampler;
 }
 
 #endif  // __linux__
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 9942567..86f105f 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -71,6 +71,10 @@
 
 
 TEST(Promotion) {
+  // This test requires compaction. If compaction is turned off, we
+  // skip the entire test.
+  if (FLAG_never_compact) return;
+
   // Ensure that we get a compacting collection so that objects are promoted
   // from new space.
   FLAG_gc_global = true;
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index a93fc27..e642d1b 100755
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -257,15 +257,22 @@
       NULL
   };
 
+  uintptr_t stack_limit = i::StackGuard::real_climit();
   for (int i = 0; programs[i]; i++) {
     const char* program = programs[i];
-    unibrow::Utf8InputBuffer<256> stream(program, strlen(program));
+    i::Utf8ToUC16CharacterStream stream(
+        reinterpret_cast<const i::byte*>(program),
+        static_cast<unsigned>(strlen(program)));
     i::CompleteParserRecorder log;
     i::V8JavaScriptScanner scanner;
-    scanner.Initialize(i::Handle<i::String>::null(), &stream);
-    v8::preparser::PreParser preparser;
-    bool result = preparser.PreParseProgram(&scanner, &log, true);
-    CHECK(result);
+    scanner.Initialize(&stream);
+
+    v8::preparser::PreParser::PreParseResult result =
+        v8::preparser::PreParser::PreParseProgram(&scanner,
+                                                  &log,
+                                                  true,
+                                                  stack_limit);
+    CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
     i::ScriptDataImpl data(log.ExtractData());
     CHECK(!data.has_error());
   }
@@ -284,9 +291,10 @@
   // and then used the invalid currently scanned literal. This always
   // failed in debug mode, and sometimes crashed in release mode.
 
-  unibrow::Utf8InputBuffer<256> stream(program, strlen(program));
+  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
+                                      static_cast<unsigned>(strlen(program)));
   i::ScriptDataImpl* data =
-      i::ParserApi::PreParse(i::Handle<i::String>::null(), &stream, NULL);
+      i::ParserApi::PreParse(&stream, NULL);
   CHECK(data->HasError());
   delete data;
 }
@@ -305,10 +313,10 @@
       "try { } catch (e) { var foo = function () { /* first */ } }"
       "var bar = function () { /* second */ }";
 
-  unibrow::Utf8InputBuffer<256> stream(program, strlen(program));
+  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(program),
+                                      static_cast<unsigned>(strlen(program)));
   i::ScriptDataImpl* data =
-      i::ParserApi::PartialPreParse(i::Handle<i::String>::null(),
-                                    &stream, NULL);
+      i::ParserApi::PartialPreParse(&stream, NULL);
   CHECK(!data->HasError());
 
   data->Initialize();
@@ -327,3 +335,313 @@
   CHECK_EQ('}', program[entry2.end_pos() - 1]);
   delete data;
 }
+
+
+TEST(PreParseOverflow) {
+  int marker;
+  i::StackGuard::SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+  size_t kProgramSize = 1024 * 1024;
+  i::SmartPointer<char> program(
+      reinterpret_cast<char*>(malloc(kProgramSize + 1)));
+  memset(*program, '(', kProgramSize);
+  program[kProgramSize] = '\0';
+
+  uintptr_t stack_limit = i::StackGuard::real_climit();
+
+  i::Utf8ToUC16CharacterStream stream(
+      reinterpret_cast<const i::byte*>(*program),
+      static_cast<unsigned>(kProgramSize));
+  i::CompleteParserRecorder log;
+  i::V8JavaScriptScanner scanner;
+  scanner.Initialize(&stream);
+
+
+  v8::preparser::PreParser::PreParseResult result =
+      v8::preparser::PreParser::PreParseProgram(&scanner,
+                                                &log,
+                                                true,
+                                                stack_limit);
+  CHECK_EQ(v8::preparser::PreParser::kPreParseStackOverflow, result);
+}
+
+
+class TestExternalResource: public v8::String::ExternalStringResource {
+ public:
+  explicit TestExternalResource(uint16_t* data, int length)
+      : data_(data), length_(static_cast<size_t>(length)) { }
+
+  ~TestExternalResource() { }
+
+  const uint16_t* data() const {
+    return data_;
+  }
+
+  size_t length() const {
+    return length_;
+  }
+ private:
+  uint16_t* data_;
+  size_t length_;
+};
+
+
+#define CHECK_EQU(v1, v2) CHECK_EQ(static_cast<int>(v1), static_cast<int>(v2))
+
+void TestCharacterStream(const char* ascii_source,
+                         unsigned length,
+                         unsigned start = 0,
+                         unsigned end = 0) {
+  if (end == 0) end = length;
+  unsigned sub_length = end - start;
+  i::HandleScope test_scope;
+  i::SmartPointer<i::uc16> uc16_buffer(new i::uc16[length]);
+  for (unsigned i = 0; i < length; i++) {
+    uc16_buffer[i] = static_cast<i::uc16>(ascii_source[i]);
+  }
+  i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
+  i::Handle<i::String> ascii_string(
+      i::Factory::NewStringFromAscii(ascii_vector));
+  TestExternalResource resource(*uc16_buffer, length);
+  i::Handle<i::String> uc16_string(
+      i::Factory::NewExternalStringFromTwoByte(&resource));
+
+  i::ExternalTwoByteStringUC16CharacterStream uc16_stream(
+      i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
+  i::GenericStringUC16CharacterStream string_stream(ascii_string, start, end);
+  i::Utf8ToUC16CharacterStream utf8_stream(
+      reinterpret_cast<const i::byte*>(ascii_source), end);
+  utf8_stream.SeekForward(start);
+
+  unsigned i = start;
+  while (i < end) {
+    // Read streams one char at a time
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+    int32_t c0 = ascii_source[i];
+    int32_t c1 = uc16_stream.Advance();
+    int32_t c2 = string_stream.Advance();
+    int32_t c3 = utf8_stream.Advance();
+    i++;
+    CHECK_EQ(c0, c1);
+    CHECK_EQ(c0, c2);
+    CHECK_EQ(c0, c3);
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+  }
+  while (i > start + sub_length / 4) {
+    // Pushback, re-read, pushback again.
+    int32_t c0 = ascii_source[i - 1];
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+    uc16_stream.PushBack(c0);
+    string_stream.PushBack(c0);
+    utf8_stream.PushBack(c0);
+    i--;
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+    int32_t c1 = uc16_stream.Advance();
+    int32_t c2 = string_stream.Advance();
+    int32_t c3 = utf8_stream.Advance();
+    i++;
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+    CHECK_EQ(c0, c1);
+    CHECK_EQ(c0, c2);
+    CHECK_EQ(c0, c3);
+    uc16_stream.PushBack(c0);
+    string_stream.PushBack(c0);
+    utf8_stream.PushBack(c0);
+    i--;
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+  }
+  unsigned halfway = start + sub_length / 2;
+  uc16_stream.SeekForward(halfway - i);
+  string_stream.SeekForward(halfway - i);
+  utf8_stream.SeekForward(halfway - i);
+  i = halfway;
+  CHECK_EQU(i, uc16_stream.pos());
+  CHECK_EQU(i, string_stream.pos());
+  CHECK_EQU(i, utf8_stream.pos());
+
+  while (i < end) {
+    // Read streams one char at a time
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+    int32_t c0 = ascii_source[i];
+    int32_t c1 = uc16_stream.Advance();
+    int32_t c2 = string_stream.Advance();
+    int32_t c3 = utf8_stream.Advance();
+    i++;
+    CHECK_EQ(c0, c1);
+    CHECK_EQ(c0, c2);
+    CHECK_EQ(c0, c3);
+    CHECK_EQU(i, uc16_stream.pos());
+    CHECK_EQU(i, string_stream.pos());
+    CHECK_EQU(i, utf8_stream.pos());
+  }
+
+  int32_t c1 = uc16_stream.Advance();
+  int32_t c2 = string_stream.Advance();
+  int32_t c3 = utf8_stream.Advance();
+  CHECK_LT(c1, 0);
+  CHECK_LT(c2, 0);
+  CHECK_LT(c3, 0);
+}
+
+
+TEST(CharacterStreams) {
+  v8::HandleScope handles;
+  v8::Persistent<v8::Context> context = v8::Context::New();
+  v8::Context::Scope context_scope(context);
+
+  TestCharacterStream("abc\0\n\r\x7f", 7);
+  static const unsigned kBigStringSize = 4096;
+  char buffer[kBigStringSize + 1];
+  for (unsigned i = 0; i < kBigStringSize; i++) {
+    buffer[i] = static_cast<char>(i & 0x7f);
+  }
+  TestCharacterStream(buffer, kBigStringSize);
+
+  TestCharacterStream(buffer, kBigStringSize, 576, 3298);
+
+  TestCharacterStream("\0", 1);
+  TestCharacterStream("", 0);
+}
+
+
+TEST(Utf8CharacterStream) {
+  static const unsigned kMaxUC16CharU = unibrow::Utf8::kMaxThreeByteChar;
+  static const int kMaxUC16Char = static_cast<int>(kMaxUC16CharU);
+
+  static const int kAllUtf8CharsSize =
+      (unibrow::Utf8::kMaxOneByteChar + 1) +
+      (unibrow::Utf8::kMaxTwoByteChar - unibrow::Utf8::kMaxOneByteChar) * 2 +
+      (unibrow::Utf8::kMaxThreeByteChar - unibrow::Utf8::kMaxTwoByteChar) * 3;
+  static const unsigned kAllUtf8CharsSizeU =
+      static_cast<unsigned>(kAllUtf8CharsSize);
+
+  char buffer[kAllUtf8CharsSizeU];
+  unsigned cursor = 0;
+  for (int i = 0; i <= kMaxUC16Char; i++) {
+    cursor += unibrow::Utf8::Encode(buffer + cursor, i);
+  }
+  ASSERT(cursor == kAllUtf8CharsSizeU);
+
+  i::Utf8ToUC16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
+                                      kAllUtf8CharsSizeU);
+  for (int i = 0; i <= kMaxUC16Char; i++) {
+    CHECK_EQU(i, stream.pos());
+    int32_t c = stream.Advance();
+    CHECK_EQ(i, c);
+    CHECK_EQU(i + 1, stream.pos());
+  }
+  for (int i = kMaxUC16Char; i >= 0; i--) {
+    CHECK_EQU(i + 1, stream.pos());
+    stream.PushBack(i);
+    CHECK_EQU(i, stream.pos());
+  }
+  int i = 0;
+  while (stream.pos() < kMaxUC16CharU) {
+    CHECK_EQU(i, stream.pos());
+    unsigned progress = stream.SeekForward(12);
+    i += progress;
+    int32_t c = stream.Advance();
+    if (i <= kMaxUC16Char) {
+      CHECK_EQ(i, c);
+    } else {
+      CHECK_EQ(-1, c);
+    }
+    i += 1;
+    CHECK_EQU(i, stream.pos());
+  }
+}
+
+#undef CHECK_EQU
+
+void TestStreamScanner(i::UC16CharacterStream* stream,
+                       i::Token::Value* expected_tokens,
+                       int skip_pos = 0,  // Zero means not skipping.
+                       int skip_to = 0) {
+  i::V8JavaScriptScanner scanner;
+  scanner.Initialize(stream, i::JavaScriptScanner::kAllLiterals);
+
+  int i = 0;
+  do {
+    i::Token::Value expected = expected_tokens[i];
+    i::Token::Value actual = scanner.Next();
+    CHECK_EQ(i::Token::String(expected), i::Token::String(actual));
+    if (scanner.location().end_pos == skip_pos) {
+      scanner.SeekForward(skip_to);
+    }
+    i++;
+  } while (expected_tokens[i] != i::Token::ILLEGAL);
+}
+
+TEST(StreamScanner) {
+  const char* str1 = "{ foo get for : */ <- \n\n /*foo*/ bib";
+  i::Utf8ToUC16CharacterStream stream1(reinterpret_cast<const i::byte*>(str1),
+                                       static_cast<unsigned>(strlen(str1)));
+  i::Token::Value expectations1[] = {
+      i::Token::LBRACE,
+      i::Token::IDENTIFIER,
+      i::Token::IDENTIFIER,
+      i::Token::FOR,
+      i::Token::COLON,
+      i::Token::MUL,
+      i::Token::DIV,
+      i::Token::LT,
+      i::Token::SUB,
+      i::Token::IDENTIFIER,
+      i::Token::EOS,
+      i::Token::ILLEGAL
+  };
+  TestStreamScanner(&stream1, expectations1, 0, 0);
+
+  const char* str2 = "case default const {THIS\nPART\nSKIPPED} do";
+  i::Utf8ToUC16CharacterStream stream2(reinterpret_cast<const i::byte*>(str2),
+                                       static_cast<unsigned>(strlen(str2)));
+  i::Token::Value expectations2[] = {
+      i::Token::CASE,
+      i::Token::DEFAULT,
+      i::Token::CONST,
+      i::Token::LBRACE,
+      // Skipped part here
+      i::Token::RBRACE,
+      i::Token::DO,
+      i::Token::EOS,
+      i::Token::ILLEGAL
+  };
+  ASSERT_EQ('{', str2[19]);
+  ASSERT_EQ('}', str2[37]);
+  TestStreamScanner(&stream2, expectations2, 20, 37);
+
+  const char* str3 = "{}}}}";
+  i::Token::Value expectations3[] = {
+      i::Token::LBRACE,
+      i::Token::RBRACE,
+      i::Token::RBRACE,
+      i::Token::RBRACE,
+      i::Token::RBRACE,
+      i::Token::EOS,
+      i::Token::ILLEGAL
+  };
+  // Skip zero-four RBRACEs.
+  for (int i = 0; i <= 4; i++) {
+     expectations3[6 - i] = i::Token::ILLEGAL;
+     expectations3[5 - i] = i::Token::EOS;
+     i::Utf8ToUC16CharacterStream stream3(
+         reinterpret_cast<const i::byte*>(str3),
+         static_cast<unsigned>(strlen(str3)));
+     TestStreamScanner(&stream3, expectations3, 1, 1 + i);
+  }
+}
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index f46191a..f849d40 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -757,6 +757,10 @@
 
 
 TEST(RecordStackTraceAtStartProfiling) {
+  // This test does not pass with inlining enabled since inlined functions
+  // don't appear in the stack trace.
+  i::FLAG_use_inlining = false;
+
   if (env.IsEmpty()) {
     v8::HandleScope scope;
     const char* extensions[] = { "v8/profiler" };
@@ -778,12 +782,16 @@
       CpuProfiler::GetProfile(NULL, 0);
   const ProfileTree* topDown = profile->top_down();
   const ProfileNode* current = topDown->root();
+  const_cast<ProfileNode*>(current)->Print(0);
   // The tree should look like this:
   //  (root)
   //   (anonymous function)
   //     a
   //       b
   //         c
+  // There can also be:
+  //           startProfiling
+  // if the sampler managed to get a tick.
   current = PickChild(current, "(anonymous function)");
   CHECK_NE(NULL, const_cast<ProfileNode*>(current));
   current = PickChild(current, "a");
@@ -792,7 +800,12 @@
   CHECK_NE(NULL, const_cast<ProfileNode*>(current));
   current = PickChild(current, "c");
   CHECK_NE(NULL, const_cast<ProfileNode*>(current));
-  CHECK_EQ(0, current->children()->length());
+  CHECK(current->children()->length() == 0 ||
+        current->children()->length() == 1);
+  if (current->children()->length() == 1) {
+    current = PickChild(current, "startProfiling");
+    CHECK_EQ(0, current->children()->length());
+  }
 }
 
 
diff --git a/test/cctest/test-reloc-info.cc b/test/cctest/test-reloc-info.cc
new file mode 100644
index 0000000..2b9beac
--- /dev/null
+++ b/test/cctest/test-reloc-info.cc
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "cctest.h"
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+static void WriteRinfo(RelocInfoWriter* writer,
+                       byte* pc, RelocInfo::Mode mode, intptr_t data) {
+  RelocInfo rinfo(pc, mode, data);
+  writer->Write(&rinfo);
+}
+
+
+// Tests that writing both types of positions and then reading either
+// or both works as expected.
+TEST(Positions) {
+  const int instr_size = 10 << 10;
+  const int reloc_size = 10 << 10;
+  const int buf_size = instr_size + reloc_size;
+  SmartPointer<byte> buf(new byte[buf_size]);
+  byte* pc = *buf;
+  CodeDesc desc = { *buf, buf_size, instr_size, reloc_size, NULL };
+
+  RelocInfoWriter writer(*buf + buf_size, pc);
+  for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
+    RelocInfo::Mode mode = (i % 2 == 0) ?
+        RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
+    WriteRinfo(&writer, pc, mode, pos);
+  }
+
+  // Read only (non-statement) positions.
+  {
+    RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::POSITION));
+    pc = *buf;
+    for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
+      RelocInfo::Mode mode = (i % 2 == 0) ?
+          RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
+      if (mode == RelocInfo::POSITION) {
+        CHECK_EQ(pc, it.rinfo()->pc());
+        CHECK_EQ(mode, it.rinfo()->rmode());
+        CHECK_EQ(pos, static_cast<int>(it.rinfo()->data()));
+        it.next();
+      }
+    }
+    CHECK(it.done());
+  }
+
+  // Read only statement positions.
+  {
+    RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
+    pc = *buf;
+    for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
+      RelocInfo::Mode mode = (i % 2 == 0) ?
+          RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
+      if (mode == RelocInfo::STATEMENT_POSITION) {
+        CHECK_EQ(pc, it.rinfo()->pc());
+        CHECK_EQ(mode, it.rinfo()->rmode());
+        CHECK_EQ(pos, static_cast<int>(it.rinfo()->data()));
+        it.next();
+      }
+    }
+    CHECK(it.done());
+  }
+
+  // Read both types of positions.
+  {
+    RelocIterator it(desc, RelocInfo::kPositionMask);
+    pc = *buf;
+    for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
+      RelocInfo::Mode mode = (i % 2 == 0) ?
+          RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
+      CHECK_EQ(pc, it.rinfo()->pc());
+      CHECK_EQ(mode, it.rinfo()->rmode());
+      CHECK_EQ(pos, static_cast<int>(it.rinfo()->data()));
+      it.next();
+    }
+    CHECK(it.done());
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index b399a4e..706c6bf 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -95,13 +95,13 @@
 
   OldSpace faked_space(Heap::MaxReserved(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
   int total_pages = 0;
-  int requested = 2;
+  int requested = MemoryAllocator::kPagesPerChunk;
   int allocated;
-  // If we request two pages, we should get one or two.
+  // If we request n pages, we should get n or n - 1.
   Page* first_page =
       MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
   CHECK(first_page->is_valid());
-  CHECK(allocated > 0 && allocated <= 2);
+  CHECK(allocated == requested || allocated == requested - 1);
   total_pages += allocated;
 
   Page* last_page = first_page;
@@ -110,11 +110,11 @@
     last_page = p;
   }
 
-  // Again, we should get one or two pages.
+  // Again, we should get n or n - 1 pages.
   Page* others =
       MemoryAllocator::AllocatePages(requested, &allocated, &faked_space);
   CHECK(others->is_valid());
-  CHECK(allocated > 0 && allocated <= 2);
+  CHECK(allocated == requested || allocated == requested - 1);
   total_pages += allocated;
 
   MemoryAllocator::SetNextPage(last_page, others);
@@ -129,11 +129,10 @@
   CHECK(second_page->is_valid());
 
   // Freeing pages at the first chunk starting at or after the second page
-  // should free the entire second chunk.  It will return the last page in the
-  // first chunk (if the second page was in the first chunk) or else an
-  // invalid page (if the second page was the start of the second chunk).
+  // should free the entire second chunk.  It will return the page it was passed
+  // (since the second page was in the first chunk).
   Page* free_return = MemoryAllocator::FreePages(second_page);
-  CHECK(free_return == last_page || !free_return->is_valid());
+  CHECK(free_return == second_page);
   MemoryAllocator::SetNextPage(first_page, free_return);
 
   // Freeing pages in the first chunk starting at the first page should free
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 88ef0a2..b48dcb8 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -103,6 +103,7 @@
 
 
 TEST(MemCopy) {
+  V8::Initialize(NULL);
   const int N = kMinComplexMemCopy + 128;
   Vector<byte> buffer1 = Vector<byte>::New(N);
   Vector<byte> buffer2 = Vector<byte>::New(N);
diff --git a/test/cctest/test-version.cc b/test/cctest/test-version.cc
index 6d26855..6bec4b7 100644
--- a/test/cctest/test-version.cc
+++ b/test/cctest/test-version.cc
@@ -74,6 +74,20 @@
 
 
 TEST(VersionString) {
+#ifdef USE_SIMULATOR
+  CheckVersion(0, 0, 0, 0, false, "0.0.0 SIMULATOR", "libv8-0.0.0.so");
+  CheckVersion(0, 0, 0, 0, true,
+               "0.0.0 (candidate) SIMULATOR", "libv8-0.0.0-candidate.so");
+  CheckVersion(1, 0, 0, 0, false, "1.0.0 SIMULATOR", "libv8-1.0.0.so");
+  CheckVersion(1, 0, 0, 0, true,
+               "1.0.0 (candidate) SIMULATOR", "libv8-1.0.0-candidate.so");
+  CheckVersion(1, 0, 0, 1, false, "1.0.0.1 SIMULATOR", "libv8-1.0.0.1.so");
+  CheckVersion(1, 0, 0, 1, true,
+               "1.0.0.1 (candidate) SIMULATOR", "libv8-1.0.0.1-candidate.so");
+  CheckVersion(2, 5, 10, 7, false, "2.5.10.7 SIMULATOR", "libv8-2.5.10.7.so");
+  CheckVersion(2, 5, 10, 7, true,
+               "2.5.10.7 (candidate) SIMULATOR", "libv8-2.5.10.7-candidate.so");
+#else
   CheckVersion(0, 0, 0, 0, false, "0.0.0", "libv8-0.0.0.so");
   CheckVersion(0, 0, 0, 0, true,
                "0.0.0 (candidate)", "libv8-0.0.0-candidate.so");
@@ -86,4 +100,5 @@
   CheckVersion(2, 5, 10, 7, false, "2.5.10.7", "libv8-2.5.10.7.so");
   CheckVersion(2, 5, 10, 7, true,
                "2.5.10.7 (candidate)", "libv8-2.5.10.7-candidate.so");
+#endif
 }
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index 5add082..a51bd30 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -29,6 +29,8 @@
 def UNIMPLEMENTED = PASS || FAIL
 def FAIL_OK = FAIL, OKAY
 
+
+##############################################################################
 # Non UTF8 characters in test files.
 chapter10/10.4/10.4.2/10.4.2-3-c-2-s: FAIL_OK
 chapter10/10.4/10.4.2/10.4.2-3-c-1-s: FAIL_OK
@@ -86,7 +88,7 @@
 # than those described in the spec - but according to spec they can 
 # have additional properties.
 # All compareArray calls in these tests could be exchanged with a 
-# isSubsetOfArray call (I will upload a path to the es5conform site).
+# isSubsetOfArray call (I will upload a patch to the es5conform site).
 
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-1: FAIL_OK
@@ -110,12 +112,6 @@
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-7: FAIL_OK
 
 # SUBSETFAIL
-chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-8: FAIL_OK
-
-# SUBSETFAIL
-chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-9: FAIL_OK
-
-# SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
 
 # We do not implement all methods on RegExp.
diff --git a/test/message/message.status b/test/message/message.status
index c4a3842..70354ce 100644
--- a/test/message/message.status
+++ b/test/message/message.status
@@ -30,6 +30,8 @@
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
 
+
+##############################################################################
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
diff --git a/test/message/try-catch-finally-return-in-finally.js b/test/message/try-catch-finally-return-in-finally.js
index d23fe35..58a62a8 100644
--- a/test/message/try-catch-finally-return-in-finally.js
+++ b/test/message/try-catch-finally-return-in-finally.js
@@ -36,4 +36,5 @@
   }
 }
 
-print(f());
+var result = f();
+if (result != 42) print("Wrong result: " + result);
diff --git a/test/message/try-catch-finally-return-in-finally.out b/test/message/try-catch-finally-return-in-finally.out
index 1c42ee0..f59f5c6 100644
--- a/test/message/try-catch-finally-return-in-finally.out
+++ b/test/message/try-catch-finally-return-in-finally.out
@@ -24,5 +24,3 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-42
diff --git a/test/message/try-finally-return-in-finally.js b/test/message/try-finally-return-in-finally.js
index 6ec8970..0e46193 100644
--- a/test/message/try-finally-return-in-finally.js
+++ b/test/message/try-finally-return-in-finally.js
@@ -34,4 +34,5 @@
   }
 }
 
-print(f());
+var result = f();
+if (result != 42) print("Wrong result: " + result);
diff --git a/test/message/try-finally-return-in-finally.out b/test/message/try-finally-return-in-finally.out
index 1c42ee0..f59f5c6 100644
--- a/test/message/try-finally-return-in-finally.out
+++ b/test/message/try-finally-return-in-finally.out
@@ -24,5 +24,3 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-42
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/accessors-on-global-object.js
similarity index 60%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/accessors-on-global-object.js
index 6e292d6..8d95692 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/accessors-on-global-object.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,48 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Test that installing a getter on the global object instead of a
+// normal property works.
 
+var x = 0;
 
-// Flags: --nofull-compiler
+function getX() { return x; }
 
-function foo() {
-  return (0 > ("10"||10) - 1);
+for (var i = 0; i < 10; i++) {
+  assertEquals(i < 5 ? 0 : 42, getX());
+  if (i == 4) __defineGetter__("x", function() { return 42; });
 }
 
-assertFalse(foo());
+
+// Test that installing a setter on the global object instead of a
+// normal property works.
+
+var y = 0;
+var setter_y;
+
+function setY(value) { y = value; }
+
+for (var i = 0; i < 10; i++) {
+  setY(i);
+  assertEquals(i < 5 ? i : 2 * i, y);
+  if (i == 4) {
+    __defineSetter__("y", function(value) { setter_y = 2 * value; });
+    __defineGetter__("y", function() { return setter_y; });
+  }
+}
+
+
+// Test that replacing a getter with a normal property works as
+// expected.
+
+__defineGetter__("z", function() { return 42; });
+
+function getZ() { return z; }
+
+for (var i = 0; i < 10; i++) {
+  assertEquals(i < 5 ? 42 : 0, getZ());
+  if (i == 4) {
+    delete z;
+    var z = 0;
+  }
+}
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/apply-arguments-gc-safepoint.js
similarity index 80%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/apply-arguments-gc-safepoint.js
index 6e292d6..57ed8cc 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/apply-arguments-gc-safepoint.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Flags: --expose-gc
 
+// Test that safepoint tables are correctly generated for apply with
+// arguments in the case where arguments adaption is needed.
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f(x, y) {
+  if (x == 149999) gc();
+  return x + y;
 }
 
-assertFalse(foo());
+function g() {
+  f.apply(this, arguments);
+}
+
+for (var i = 0; i < 150000; i++) {
+  g(i);
+}
diff --git a/test/mjsunit/array-functions-prototype.js b/test/mjsunit/array-functions-prototype.js
index ea0dc61..b68ee73 100644
--- a/test/mjsunit/array-functions-prototype.js
+++ b/test/mjsunit/array-functions-prototype.js
@@ -55,7 +55,7 @@
 // shift.
 // ----------------------------------------------------------------------
 
-function runTest() {
+function runTest1() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Shift away the zero.
@@ -80,13 +80,13 @@
   assertEquals('two', nonArray[2]);
 }
 
-runTest();
+runTest1();
 
 // ----------------------------------------------------------------------
 // unshift.
 // ----------------------------------------------------------------------
 
-runTest = function() {
+runTest2 = function() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Unshift a new 'zero'.
@@ -110,14 +110,14 @@
   assertEquals('two', nonArray[3]);
 }
 
-runTest();
+runTest2();
 
 
 // ----------------------------------------------------------------------
 // splice
 // ----------------------------------------------------------------------
 
-runTest = function() {
+runTest3 = function() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Delete the first element by splicing in nothing.
@@ -140,14 +140,14 @@
   assertEquals('two', nonArray[2]);
 };
 
-runTest();
+runTest3();
 
 
 // ----------------------------------------------------------------------
 // slice
 // ----------------------------------------------------------------------
 
-runTest = function() {
+runTest4 = function() {
   var nonArray = new constructor();
   var array = ['zero', , 'two'];
   // Again Spidermonkey is inconsistent.  (array.slice(0, 3))[1] is
@@ -156,4 +156,4 @@
   assertArrayEquals(['zero', 'one', 'two'], Array.prototype.slice.call(nonArray, 0, 3));
 };
 
-runTest();
+runTest4();
diff --git a/test/mjsunit/array-slice.js b/test/mjsunit/array-slice.js
index 8f9ce53..50b5b27 100644
--- a/test/mjsunit/array-slice.js
+++ b/test/mjsunit/array-slice.js
@@ -218,3 +218,16 @@
     assertTrue(delete Array.prototype[5]);
   }
 })();
+
+// Check slicing on arguments object.
+(function() {
+  function func(expected, a0, a1, a2) {
+    assertEquals(expected, Array.prototype.slice.call(arguments, 1));
+  }
+
+  func([]);
+  func(['a'], 'a');
+  func(['a', 1], 'a', 1);
+  func(['a', 1, undefined], 'a', 1, undefined);
+  func(['a', 1, undefined, void(0)], 'a', 1, undefined, void(0));
+})();
diff --git a/test/mjsunit/array-sort.js b/test/mjsunit/array-sort.js
index a082abc..7060c5f 100644
--- a/test/mjsunit/array-sort.js
+++ b/test/mjsunit/array-sort.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -360,3 +360,18 @@
 }
 
 TestSpecialCasesInheritedElementSort();
+
+// Test that sort calls compare function with global object as receiver,
+// and with only elements of the array as arguments.
+function o(v) { 
+  return {__proto__: o.prototype, val: v};
+}
+var arr = [o(1), o(2), o(4), o(8), o(16), o(32), o(64), o(128), o(256), o(-0)];
+var global = this;
+function cmpTest(a, b) {
+  assertEquals(global, this);
+  assertTrue(a instanceof o);
+  assertTrue(b instanceof o);
+  return a.val - b.val;
+}
+arr.sort(cmpTest);
\ No newline at end of file
diff --git a/test/mjsunit/bitops-info.js b/test/mjsunit/bitops-info.js
index 4b114c5..4660fdf 100644
--- a/test/mjsunit/bitops-info.js
+++ b/test/mjsunit/bitops-info.js
@@ -37,6 +37,7 @@
   return 1600822924;  // It's a signed Int32.
 }
 
+
 function f() {
   var x = non_int32();  // Not a constant.
   var y = hidden_smi();  // Not a constant.
@@ -65,13 +66,6 @@
   assertEquals(46512102 & 2600822924, x & y, "10rev");
   assertEquals(1600822924 & 2600822924, x & z, "11rev");
 
-  assertEquals((46512102 & -0x20123456) | 1, (y & -0x20123456) | 1, "12");
-  assertEquals((1600822924 & -0x20123456) | 1, (z & -0x20123456) | 1, "13");
-  assertEquals((2600822924 & -0x20123456) | 1, (x & -0x20123456) | 1, "14");
-  assertEquals((46512102 & -0x20123456) | 1, (-0x20123456 & y) | 1, "12rev");
-  assertEquals((1600822924 & -0x20123456) | 1, (-0x20123456 & z) | 1, "13rev");
-  assertEquals((2600822924 & -0x20123456) | 1, (-0x20123456 & x) | 1, "14rev");
-
   assertEquals(2600822924 & 2600822924, x & x, "xx");
   assertEquals(y, y & y, "yy");
   assertEquals(z, z & z, "zz");
diff --git a/test/mjsunit/codegen-coverage.js b/test/mjsunit/codegen-coverage.js
index 8e7f189..cd53863 100644
--- a/test/mjsunit/codegen-coverage.js
+++ b/test/mjsunit/codegen-coverage.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --nofull-compiler --nofast-compiler
-
 // Test paths in the code generator where values in specific registers
 // get moved around.
 function identity(x) {
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/alloc-number.js
similarity index 81%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/alloc-number.js
index 6e292d6..85c39de 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/alloc-number.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+// Try to get a GC because of a heap number allocation while we
+// have live values (o) in a register.
+function f(o) {
+  var x = 1.5;
+  var y = 2.5;
+  for (var i = 1; i < 100000; i+=2) o.val = x + y + i;
+  return o;
 }
 
-assertFalse(foo());
+var o = { val: 0 };
+for (var i = 0; i < 100; i++) f(o);
diff --git a/test/mjsunit/compiler/array-access.js b/test/mjsunit/compiler/array-access.js
new file mode 100644
index 0000000..65b3c99
--- /dev/null
+++ b/test/mjsunit/compiler/array-access.js
@@ -0,0 +1,132 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function Get0(a) {
+  return a[0];
+}
+
+function GetN(a,n) {
+  return a[n];
+}
+
+function GetA0(a) {
+  return a[a[0]];
+}
+
+function GetAN(a,n) {
+  return a[a[n]];
+}
+
+function GetAAN(a,n) {
+  return a[a[a[n]]];
+}
+
+function RunGetTests() {
+  var a = [2,0,1];
+  assertEquals(2, Get0(a));
+
+  assertEquals(2, GetN(a, 0));
+  assertEquals(0, GetN(a, 1));
+  assertEquals(1, GetN(a, 2));
+
+  assertEquals(1, GetA0(a));
+
+  assertEquals(1, GetAN(a,0));
+  assertEquals(2, GetAN(a,1));
+  assertEquals(0, GetAN(a,2));
+
+  assertEquals(0, GetAAN(a,0));
+  assertEquals(1, GetAAN(a,1));
+  assertEquals(2, GetAAN(a,2));
+}
+
+
+function Set07(a) {
+  a[0] = 7;
+}
+
+function Set0V(a, v) {
+  a[0] = v;
+}
+
+function SetN7(a, n) {
+  a[n] = 7;
+}
+
+function SetNX(a, n, x) {
+  a[n] = x;
+}
+
+function RunSetTests(a) {
+  Set07(a);
+  assertEquals(7, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(0, a[2]);
+
+  Set0V(a, 1);
+  assertEquals(1, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(0, a[2]);
+
+  SetN7(a, 2);
+  assertEquals(1, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(7, a[2]);
+
+  SetNX(a, 1, 5);
+  assertEquals(1, a[0]);
+  assertEquals(5, a[1]);
+  assertEquals(7, a[2]);
+
+  for (var i = 0; i < 3; i++) SetNX(a, i, 0);
+  assertEquals(0, a[0]);
+  assertEquals(0, a[1]);
+  assertEquals(0, a[2]);
+}
+
+function RunArrayBoundsCheckTest() {
+  var g = [1,2,3];
+
+  function f(a, i) { a[i] = 42; }
+
+  for (var i = 0; i < 100000; i++) { f(g, 0); }
+
+  f(g, 4);
+
+  assertEquals(42, g[0]);
+  assertEquals(42, g[4]);
+}
+
+var a = [0,0,0];
+var o = {0: 0, 1: 0, 2: 0};
+for (var i = 0; i < 1000; i++) {
+  RunGetTests();
+  RunSetTests(a);
+  RunSetTests(o);
+}
+
+RunArrayBoundsCheckTest();
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/array-length.js
similarity index 75%
rename from test/mjsunit/regress/regress-1146.js
rename to test/mjsunit/compiler/array-length.js
index e8028ce..7adb9ab 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/array-length.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function ArrayLength(a) { return a.length; }
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function Test(a0, a2, a5) {
+  assertEquals(0, ArrayLength(a0));
+  assertEquals(2, ArrayLength(a2));
+  assertEquals(5, ArrayLength(a5));
+}
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
-
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+var a0 = [];
+var a2 = [1,2];
+var a5 = [1,2,3,4,5];
+for (var i = 0; i < 10000000; i++) Test(a0, a2, a5);
+assertEquals("undefined", typeof(ArrayLength(0)));
+for (var i = 0; i < 10000000; i++) Test(a0, a2, a5);
+assertEquals(4, ArrayLength("hest"));
diff --git a/test/mjsunit/compiler/assignment-deopt.js b/test/mjsunit/compiler/assignment-deopt.js
new file mode 100644
index 0000000..74f185b
--- /dev/null
+++ b/test/mjsunit/compiler/assignment-deopt.js
@@ -0,0 +1,146 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test deopt with count operation on parameter.
+var max_smi = 1073741823;
+var o = {x:0};
+
+function assign1(x) { x += 1; o.x = x; }
+assign1(max_smi);
+assertEquals(max_smi + 1, o.x);
+
+assign1(1.1);
+assertEquals(2.1, o.x);
+
+
+// Test deopt with count operation on named property.
+function assign2(p) { p.x += 1 }
+
+o.x = "42";
+assign2(o);
+assertEquals("421", o.x);
+
+var s = max_smi - 10000;
+o.x = s;
+for(var i = 0; i < 20000; i++) {
+  assign2(o);
+}
+assertEquals(max_smi + 10000, o.x);
+
+
+// Test deopt with count operation on keyed property.
+function assign3(a, b) { a[b] += 1; }
+
+o = ["42"];
+assign3(o, 0);
+assertEquals("421", o[0]);
+
+var s = max_smi - 10000;
+o[0] = s;
+for(var i = 0; i < 20000; i++) {
+  assign3(o, 0);
+}
+assertEquals(max_smi + 10000, o[0]);
+
+assign3(o,"0");
+
+assertEquals(max_smi + 10001, o[0]);
+
+// Test bailout when accessing a non-existing array element.
+o[0] = 0;
+for(var i = 0; i < 10000; i++) {
+  assign3(o, 0);
+}
+assign3(o,1);
+
+// Test bailout with count operation in a value context.
+function assign5(x,y) { return (x += 1) + y; }
+for (var i = 0; i < 10000; ++i) assertEquals(4, assign5(2, 1));
+assertEquals(4.1, assign5(2, 1.1));
+assertEquals(4.1, assign5(2.1, 1));
+
+function assign7(o,y) { return (o.x += 1) + y; }
+o = {x:0};
+for (var i = 0; i < 10000; ++i) {
+  o.x = 42;
+  assertEquals(44, assign7(o, 1));
+}
+o.x = 42;
+assertEquals(44.1, assign7(o, 1.1));
+o.x = 42.1;
+assertEquals(44.1, assign7(o, 1));
+
+function assign9(o,y) { return (o[0] += 1) + y; }
+q = [0];
+for (var i = 0; i < 10000; ++i) {
+  q[0] = 42;
+  assertEquals(44, assign9(q, 1));
+}
+q[0] = 42;
+assertEquals(44.1, assign9(q, 1.1));
+q[0] = 42.1;
+assertEquals(44.1, assign9(q, 1));
+
+// Test deopt because of a failed map check on the load.
+function assign10(p) { return p.x += 1 }
+var g1 = {x:0};
+var g2 = {y:0, x:42};
+for (var i = 0; i < 10000; ++i) {
+  g1.x = 42;
+  assertEquals(43, assign10(g1));
+  assertEquals(43, g1.x);
+}
+assertEquals(43, assign10(g2));
+assertEquals(43, g2.x);
+
+// Test deopt because of a failed map check on the store.
+// The binary operation changes the map as a side effect.
+o = {x:0};
+var g3 = { valueOf: function() { o.y = "bar"; return 42; }};
+function assign11(p) { return p.x += 1; }
+
+for (var i = 0; i < 10000; i++) {
+  o.x = "a";
+  assign11(o);
+}
+assertEquals("a11", assign11(o));
+o.x = g3;
+assertEquals(43, assign11(o));
+assertEquals("bar", o.y);
+
+o = [0];
+var g4 = { valueOf: function() { o.y = "bar"; return 42; }};
+function assign12(p) { return p[0] += 1; }
+
+for (var i = 0; i < 1000000; i++) {
+  o[0] = "a";
+  assign12(o);
+}
+assertEquals("a11", assign12(o));
+o[0] = g4;
+assertEquals(43, assign12(o));
+assertEquals("bar", o.y);
diff --git a/test/mjsunit/compiler/assignment.js b/test/mjsunit/compiler/assignment.js
index 6aded4e..1f3f282 100644
--- a/test/mjsunit/compiler/assignment.js
+++ b/test/mjsunit/compiler/assignment.js
@@ -264,6 +264,13 @@
 bar_loop();
 
 
+// Test assignment in test context.
+function test_assign(x, y) { if (x = y) return x; }
+
+assertEquals(42, test_assign(0, 42));
+
+assertEquals("undefined", typeof test_assign(42, 0));
+
 // Test for assignment using a keyed store ic:
 function store_i_in_element_i_of_object_i() {
   var i = new Object();
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/binary-ops.js
similarity index 61%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/binary-ops.js
index e8028ce..27745c1 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/binary-ops.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Values in distinct spans.
+function or_test0(x, y) { return x | y; }
+function and_test0(x, y) { return x & y; }
+function add_test0(x, y) { return x + y; }
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+assertEquals(3, or_test0(1, 2));   // 1 | 2
+assertEquals(2, and_test0(3, 6));  // 3 & 6
+assertEquals(5, add_test0(2, 3));  // 2 + 3
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+// Values in the same span.
+function or_test1(x, y) { return x | x; }
+function and_test1(x, y) { return x & x; }
+function add_test1(x, y) { return x + x; }
+
+assertEquals(1, or_test1(1, 2));   // 1 | 1
+assertEquals(3, and_test1(3, 6));  // 3 & 3
+assertEquals(4, add_test1(2, 3));  // 2 + 2
+
+
+// Values in distinct spans that alias.
+function or_test2(x, y) { x = y; return x | y; }
+function and_test2(x, y) { x = y; return x & y; }
+function add_test2(x, y) { x = y; return x + y; }
+
+assertEquals(2, or_test2(1, 2));   // 2 | 2
+assertEquals(6, and_test2(3, 6));  // 6 & 6
+assertEquals(6, add_test2(2, 3));  // 3 + 3
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/call-keyed.js
similarity index 86%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/call-keyed.js
index 6e292d6..d442212 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/call-keyed.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,14 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+A = {}
+A.i = [];
+A.i.push(function () { });
+A.i.push(function () { });
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f (event) {
+ for(var i = 0, j = A.i.length; i < j; ++i)
+   A.i[i]();
 }
 
-assertFalse(foo());
+f(null);
diff --git a/test/mjsunit/compiler/compare.js b/test/mjsunit/compiler/compare.js
new file mode 100644
index 0000000..3f96087
--- /dev/null
+++ b/test/mjsunit/compiler/compare.js
@@ -0,0 +1,108 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function MaxLT(x, y) {
+  if (x < y) return y;
+  return x;
+}
+
+function MaxLE(x, y) {
+  if (x <= y) return y;
+  return x;
+}
+
+function MaxGE(x, y) {
+  if (x >= y) return x;
+  return y;
+}
+
+function MaxGT(x, y) {
+  if (x > y) return x;
+  return y;
+}
+
+
+// First test primitive values.
+function TestPrimitive(max, x, y) {
+  assertEquals(max, MaxLT(x, y), "MaxLT - primitive");
+  assertEquals(max, MaxLE(x, y), "MaxLE - primitive");
+  assertEquals(max, MaxGE(x, y), "MaxGE - primitive");
+  assertEquals(max, MaxGT(x, y), "MaxGT - primitive");
+}
+
+TestPrimitive(1, 0, 1);
+TestPrimitive(1, 1, 0);
+TestPrimitive(4, 3, 4);
+TestPrimitive(4, 4, 3);
+TestPrimitive(0, -1, 0);
+TestPrimitive(0, 0, -1)
+TestPrimitive(-2, -2, -3);
+TestPrimitive(-2, -3, -2);
+
+TestPrimitive(1, 0.1, 1);
+TestPrimitive(1, 1, 0.1);
+TestPrimitive(4, 3.1, 4);
+TestPrimitive(4, 4, 3.1);
+TestPrimitive(0, -1.1, 0);
+TestPrimitive(0, 0, -1.1)
+TestPrimitive(-2, -2, -3.1);
+TestPrimitive(-2, -3.1, -2);
+
+
+// Test non-primitive values and watch for valueOf call order.
+function TestNonPrimitive(order, f) {
+  var result = "";
+  var x = { valueOf: function() { result += "x"; } };
+  var y = { valueOf: function() { result += "y"; } };
+  f(x, y);
+  assertEquals(order, result);
+}
+
+TestNonPrimitive("xy", MaxLT);
+TestNonPrimitive("yx", MaxLE);
+TestNonPrimitive("xy", MaxGE);
+TestNonPrimitive("yx", MaxGT);
+
+// Test compare in case of aliased registers.
+function CmpX(x) { if (x == x) return 42; }
+assertEquals(42, CmpX(0));
+
+function CmpXY(x) { var y = x; if (x == y) return 42; }
+assertEquals(42, CmpXY(0));
+
+
+// Test compare against null.
+function CmpNullValue(x) { return x == null; }
+assertEquals(false, CmpNullValue(42));
+
+function CmpNullTest(x) { if (x == null) return 42; return 0; }
+assertEquals(42, CmpNullTest(null));
+
+var g1 = 0;
+function CmpNullEffect() { (g1 = 42) == null; }
+CmpNullEffect();
+assertEquals(42, g1);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/complex-for-in.js
similarity index 73%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/complex-for-in.js
index e8028ce..883f20a 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/complex-for-in.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,26 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function TestNamed(m) {
+  var o = {};
+  var result = [];
+  for (o.p in m) result.push(o.p);
+  return result;
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+assertArrayEquals(['x','y'], TestNamed({x:0, y:1}));
+assertArrayEquals(['0','1'], TestNamed([1,2]));
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+function TestKeyed(m) {
+  var a = [];
+  var result = [];
+  var i = 0;
+  for (a[i++] in m) result.push(a[i - 1]);
+  assertEquals(i, a.length);
+  return result;
+}
+
+
+assertArrayEquals(['x','y'], TestKeyed({x:0, y:1}));
+assertArrayEquals(['0','1'], TestKeyed([1,2]));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/control-flow-0.js
similarity index 83%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/control-flow-0.js
index 6e292d6..bcf4f2d 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/control-flow-0.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f() {
+  return (42 + (0 == 1 ? 1 : 2));
 }
 
-assertFalse(foo());
+
+function g(x) {
+  return (x + (0 == 1 ? 1 : 2));
+}
+
+
+function h(x) {
+  return ((x + 1) + (0 == 1 ? 1 : 2));
+}
+
+assertEquals(44, f());
+assertEquals(45, g(43));
+assertEquals(47, h(44));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/control-flow-1.js
similarity index 78%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/control-flow-1.js
index 6e292d6..973d9b6 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/control-flow-1.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,31 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+var global = this;
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f0(x) {
+  assertTrue(this === global);
+  return x;
 }
 
-assertFalse(foo());
+function g0(x, y) {
+  return f0(x == y);
+}
+
+assertTrue(g0(0, 0));
+assertFalse(g0(0, 1));
+
+
+var o = {};
+o.f1 = f1;
+function f1(x) {
+  assertTrue(this === o);
+  return x;
+}
+
+function g1(x, y) {
+  return o.f1(x == y);
+}
+
+assertTrue(g1(0, 0));
+assertFalse(g1(0, 1));
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/control-flow-2.js
similarity index 86%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/control-flow-2.js
index 6e292d6..26ed564 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/control-flow-2.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f(a,b) {
+  return (b < a) - (a < b);
 }
 
-assertFalse(foo());
+assertEquals(0, f(0,0));
+assertEquals(1, f(1,0));
+assertEquals(-1, f(0,1));
diff --git a/test/mjsunit/compiler/count-deopt.js b/test/mjsunit/compiler/count-deopt.js
new file mode 100644
index 0000000..dcd82f8
--- /dev/null
+++ b/test/mjsunit/compiler/count-deopt.js
@@ -0,0 +1,150 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test deopt with count operation on parameter.
+var max_smi = 1073741823;
+var o = {x:0};
+
+function inc1(x) { x++; o.x = x; }
+inc1(max_smi);
+assertEquals(max_smi + 1, o.x);
+
+inc1(1.1);
+assertEquals(2.1, o.x);
+
+
+// Test deopt with count operation on named property.
+function inc2(p) { p.x++ }
+
+o.x = "42";
+inc2(o);
+assertEquals(43, o.x);
+
+var s = max_smi - 10000;
+o.x = s;
+for(var i = 0; i < 20000; i++) {
+  inc2(o);
+}
+assertEquals(max_smi + 10000, o.x);
+
+
+// Test deopt with count operation on keyed property.
+function inc3(a, b) { a[b]++; }
+
+o = ["42"];
+inc3(o, 0);
+assertEquals(43, o[0]);
+
+var s = max_smi - 10000;
+o[0] = s;
+for(var i = 0; i < 20000; i++) {
+  inc3(o, 0);
+}
+assertEquals(max_smi + 10000, o[0]);
+
+inc3(o,"0");
+
+assertEquals(max_smi + 10001, o[0]);
+
+// Test bailout when accessing a non-existing array element.
+o[0] = 0;
+for(var i = 0; i < 10000; i++) {
+  inc3(o, 0);
+}
+inc3(o,1);
+
+// Test bailout with count operation in a value context.
+function inc4(x,y) { return (x++) + y; }
+for (var i = 0; i < 100000; ++i) assertEquals(3, inc4(2, 1));
+assertEquals(3.1, inc4(2, 1.1));
+
+function inc5(x,y) { return (++x) + y; }
+for (var i = 0; i < 100000; ++i) assertEquals(4, inc5(2, 1));
+assertEquals(4.1, inc5(2, 1.1));
+assertEquals(4.1, inc5(2.1, 1));
+
+function inc6(o,y) { return (o.x++) + y; }
+o = {x:0};
+for (var i = 0; i < 10000; ++i) {
+  o.x = 42;
+  assertEquals(43, inc6(o, 1));
+}
+o.x = 42;
+assertEquals(43.1, inc6(o, 1.1));
+o.x = 42.1;
+assertEquals(43.1, inc6(o, 1));
+
+function inc7(o,y) { return (++o.x) + y; }
+o = {x:0};
+for (var i = 0; i < 10000; ++i) {
+  o.x = 42;
+  assertEquals(44, inc7(o, 1));
+}
+o.x = 42;
+assertEquals(44.1, inc7(o, 1.1));
+o.x = 42.1;
+assertEquals(44.1, inc7(o, 1));
+
+function inc8(o,y) { return (o[0]++) + y; }
+var q = [0];
+for (var i = 0; i < 100000; ++i) {
+  q[0] = 42;
+  assertEquals(43, inc8(q, 1));
+}
+q[0] = 42;
+assertEquals(43.1, inc8(q, 1.1));
+q[0] = 42.1;
+assertEquals(43.1, inc8(q, 1));
+
+function inc9(o,y) { return (++o[0]) + y; }
+q = [0];
+for (var i = 0; i < 100000; ++i) {
+  q[0] = 42;
+  assertEquals(44, inc9(q, 1));
+}
+q[0] = 42;
+assertEquals(44.1, inc9(q, 1.1));
+q[0] = 42.1;
+assertEquals(44.1, inc9(q, 1));
+
+// Test deopt because of a failed map check.
+function inc10(p) { return p.x++ }
+var g1 = {x:0};
+var g2 = {y:0, x:42}
+for (var i = 0; i < 10000; ++i) {
+  g1.x = 42;
+  assertEquals(42, inc10(g1));
+  assertEquals(43, g1.x);
+}
+assertEquals(42, inc10(g2));
+assertEquals(43, g2.x);
+
+// Test deoptimization with postfix operation in a value context.
+function inc11(a) { return a[this.x++]; }
+var g3 = {x:null, f:inc11};
+var g4 = [42];
+assertEquals(42, g3.f(g4));
diff --git a/test/mjsunit/compiler/countoperation.js b/test/mjsunit/compiler/countoperation.js
index 5660cee..dca4c11 100644
--- a/test/mjsunit/compiler/countoperation.js
+++ b/test/mjsunit/compiler/countoperation.js
@@ -109,3 +109,23 @@
 assertEquals(45, b[c]);
 assertEquals(1, b[c]++ && 1);
 assertEquals(46, b[c]);
+
+// Test count operations with parameters.
+function f(x) { x++; return x; }
+assertEquals(43, f(42));
+
+function g(x) { ++x; return x; }
+assertEquals(43, g(42));
+
+function h(x) { var y = x++; return y; }
+assertEquals(42, h(42));
+
+function k(x) { var y = ++x; return y; }
+assertEquals(43, k(42));
+
+// Test count operation in a test context.
+function countTestPost(i) { var k = 0; while (i--) { k++; } return k; }
+assertEquals(10, countTestPost(10));
+
+function countTestPre(i) { var k = 0; while (--i) { k++; } return k; }
+assertEquals(9, countTestPre(10));
diff --git a/test/mjsunit/compiler/delete.js b/test/mjsunit/compiler/delete.js
new file mode 100644
index 0000000..373a1cb
--- /dev/null
+++ b/test/mjsunit/compiler/delete.js
@@ -0,0 +1,71 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests of unary delete in cases where it is always true or always false.
+
+// In an effect context, expression is always true.
+assertEquals(undefined, void (delete 0));
+// In an effect context, expression is always false.
+assertEquals(undefined, (function (x) { delete x; })(0));
+
+// In a pure test context, expression is always true.
+assertEquals(1, (delete 0) ? 1 : 2);
+// In a pure test context, expression is always false.
+assertEquals(2, (function (x) { return (delete x) ? 1 : 2; })(0));
+// In a negated test context, expression is always false.
+assertEquals(1, (function (x) { return !(delete x) ? 1 : 2; })(0));
+
+// In a hybrid test/value context, expression is always true, value
+// expected in accumulator.
+assertEquals(3, 1 + ((delete 0) && 2));
+// In a hybrid test/value context, expression is always false, value
+// expected in accumulator.
+assertEquals(false, (function (x) { return (delete x) && 2; })(0));
+// In a hybrid test/value context, expression is always true, value
+// expected on stack.
+assertEquals(3, ((delete 0) && 2) + 1);
+// In a hybrid test/value context, expression is always false, value
+// expected on stack.
+assertEquals(1, (function (x) { return ((delete x) && 2) + 1; })(0));
+
+// In a hybrid value/test context, expression is always true, value
+// expected in accumulator.
+assertEquals(2, 1 + ((delete 0) || 2));
+// In a hybrid value/test context, expression is always false, value
+// expected in accumulator.
+assertEquals(2, (function (x) { return (delete x) || 2; })(0));
+// In a hybrid value/test context, expression is always true, value
+// expected on stack.
+assertEquals(2, ((delete 0) || 2) + 1);
+// In a hybrid value/test context, expression is always false, value
+// expected on stack.
+assertEquals(3, (function (x) { return ((delete x) || 2) + 1; })(0));
+
+
+// 'this' at toplevel is different from all other global variables---not
+// deletable.
+assertEquals(true, delete this);
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/deopt-args.js
similarity index 82%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/deopt-args.js
index 6e292d6..780e2a2 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/deopt-args.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function g(x) {
+  return x.f(0,1,2);
 }
 
-assertFalse(foo());
+function f(a,b,c) {
+  return 42;
+}
+
+var object = { };
+object.f = f;
+for (var i = 0; i < 10000000; i++) {
+  assertEquals(42, g(object));
+}
+
+object.f = function(a,b,c) { return 87; };
+assertEquals(87, g(object));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/deopt-inlined-smi.js
similarity index 74%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/deopt-inlined-smi.js
index e8028ce..dda083e 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/deopt-inlined-smi.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,40 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Flags: --always-opt --always-inline-smi-code
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+// Test deoptimization into inlined smi code.
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function f(x) {
+  return ~x;
+}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+f(42);
+assertEquals(~12, f(12.45));
+assertEquals(~42, f(42.87));
+
+
+var a = 1, b = 2, c = 4, d = 8;
+function g() {
+  return a | (b | (c | d));
+}
+
+g();
+c = "16";
+assertEquals(1 | 2 | 16 | 8, g());
+
+
+function h() {
+  return 1 | a;
+}
+a = "2";
+h();
+assertEquals(3, h());
+
+
+function k() {
+  return a | 1;
+}
+a = "4";
+k();
+assertEquals(5, k());
diff --git a/test/mjsunit/compiler/expression-trees.js b/test/mjsunit/compiler/expression-trees.js
new file mode 100644
index 0000000..fac6b4c
--- /dev/null
+++ b/test/mjsunit/compiler/expression-trees.js
@@ -0,0 +1,107 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always-opt --nocompilation-cache
+
+// Given a binary operation string and an ordered array of leaf
+// strings, return an array of all binary tree strings with the leaves
+// (in order) as the fringe.
+function makeTrees(op, leaves) {
+  var len = leaves.length;
+  if (len == 1) {
+    // One leaf is a leaf.
+    return leaves;
+  } else {
+    // More than one leaf requires an interior node.
+    var result = [];
+    // Split the leaves into left and right subtrees in all possible
+    // ways.  For each split recursively compute all possible subtrees.
+    for (var i = 1; i < len; ++i) {
+      var leftTrees = makeTrees(op, leaves.slice(0, i));
+      var rightTrees = makeTrees(op, leaves.slice(i, len));
+      // Adjoin every possible left and right subtree.
+      for (var j = 0; j < leftTrees.length; ++j) {
+        for (var k = 0; k < rightTrees.length; ++k) {
+          var string = "(" + leftTrees[j] + op + rightTrees[k] + ")";
+          result.push(string);
+        }
+      }
+    }
+    return result;
+  }
+}
+
+// All 429 possible bitwise OR trees with eight leaves.
+var identifiers = ['a','b','c','d','e','f','g','h'];
+var or_trees = makeTrees("|", identifiers);
+var and_trees = makeTrees("&", identifiers);
+
+// Set up leaf masks to set 8 least-significant bits.
+var a = 1 << 0;
+var b = 1 << 1;
+var c = 1 << 2;
+var d = 1 << 3;
+var e = 1 << 4;
+var f = 1 << 5;
+var g = 1 << 6;
+var h = 1 << 7;
+
+for (var i = 0; i < or_trees.length; ++i) {
+  for (var j = 0; j < 8; ++j) {
+    var or_fun = new Function("return " + or_trees[i]);
+    if (j == 0) assertEquals(255, or_fun());
+
+    // Set the j'th variable to a string to force a bailout.
+    eval(identifiers[j] + "+= ''");
+    assertEquals(255, or_fun());
+    // Set it back to a number for the next iteration.
+    eval(identifiers[j] + "= +" + identifiers[j]);
+  }
+}
+
+// Set up leaf masks to clear 8 least-significant bits.
+a ^= 255;
+b ^= 255;
+c ^= 255;
+d ^= 255;
+e ^= 255;
+f ^= 255;
+g ^= 255;
+h ^= 255;
+
+for (i = 0; i < and_trees.length; ++i) {
+  for (var j = 0; j < 8; ++j) {
+    var and_fun = new Function("return " + and_trees[i]);
+    if (j == 0) assertEquals(0, and_fun());
+
+    // Set the j'th variable to a string to force a bailout.
+    eval(identifiers[j] + "+= ''");
+    assertEquals(0, and_fun());
+    // Set it back to a number for the next iteration.
+    eval(identifiers[j] + "= +" + identifiers[j]);
+  }
+}
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/for-stmt.js
similarity index 76%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/for-stmt.js
index 6e292d6..c8af01c 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/for-stmt.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,35 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+
+// Test variants of for loops.
+function f(i, p) {
+  for(; i < 10; ) {
+    p.x = p.x + 1;
+    i = i+1;
+  }
+}
+var o = {x:42};
+f(1, o);
+assertEquals(51, o.x);
 
 
-// Flags: --nofull-compiler
+function g(i, p) {
+  for(; ; ) {
+    if (i == 10) return;
+    p.x = p.x + 1;
+    i = i+1;
+  }
+}
+o = {x:42};
+g(1, o);
+assertEquals(51, o.x);
 
-function foo() {
-  return (0 > ("10"||10) - 1);
+
+function h(p) {
+  for(; p.x < 10; p.x++) {}
 }
 
-assertFalse(foo());
+var o = {x:0};
+h(o);
+assertEquals(10, o.x);
diff --git a/test/mjsunit/compiler/globals.js b/test/mjsunit/compiler/globals.js
index 0abd5dd..3b778da 100644
--- a/test/mjsunit/compiler/globals.js
+++ b/test/mjsunit/compiler/globals.js
@@ -63,3 +63,14 @@
 code = "g--; 1";
 assertEquals(1, eval(code));
 assertEquals(3, g);
+
+// Test simple assignment to non-deletable and deletable globals.
+var glo1 = 0;
+function f1(x) { glo1 = x; }
+f1(42);
+assertEquals(glo1, 42);
+
+glo2 = 0;
+function f2(x) { glo2 = x; }
+f2(42);
+assertEquals(42, glo2);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/inline-compare.js
similarity index 74%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/inline-compare.js
index e8028ce..6efe154 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/inline-compare.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,22 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test that we can inline a function that returns the result of
+// a compare operation.
+function TestInlineCompare(o) {
+  // Effect context.
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertFalse(x);
+  assertFalse(o.f());
+  // Test context.
+  if (o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
-
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
-
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+var o = {};
+o.f = function() { return 0 === 1; };
+for (var i = 0; i < 10000000; i++) TestInlineCompare(o);
+TestInlineCompare({f: o.f});
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/inline-conditional.js
similarity index 71%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/inline-conditional.js
index e8028ce..941f74a 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/inline-conditional.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,22 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test that we can inline a function that returns the result of
+// a conditional operation.
+function TestInlineConditional(o) {
+  // Effect context.
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertEquals(87, x);
+  assertEquals(87, o.f());
+  // Test context.
+  if (!o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
-
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
-
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+var o = {x:false,y:42,z:87};
+o.f = function() { return this.x ? this.y : this.z; };
+for (var i = 0; i < 10000; i++) TestInlineConditional(o);
+TestInlineConditional({x:true,y:87,z:42,f: o.f});
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/inline-global-access.js
similarity index 72%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/inline-global-access.js
index e8028ce..3795173 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/inline-global-access.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test that we can inline a function that returns the result of a
+// global variable load.
+var GLOBAL;
+function TestInlineGlobalLoad(o) {
+  // Effect context.
+  GLOBAL = 42;
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertEquals(42, x);
+  GLOBAL = 87;
+  assertEquals(87, o.f());
+  // Test context.
+  if (!o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
-
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
-
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+var o = {};
+o.f = function() { return GLOBAL; };
+for (var i = 0; i < 10000000; i++) TestInlineGlobalLoad(o);
+TestInlineGlobalLoad({f: o.f});
diff --git a/test/mjsunit/compiler/inline-param.js b/test/mjsunit/compiler/inline-param.js
new file mode 100644
index 0000000..8e0933a
--- /dev/null
+++ b/test/mjsunit/compiler/inline-param.js
@@ -0,0 +1,80 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a call with a parameter.
+function TestInlineOneParam(o, p) {
+  // Effect context.
+  o.f(p);
+  // Value context.
+  var x = o.f(p);
+  assertEquals(42, x);
+  assertEquals(42, o.f(p));
+  // Test context.
+  if (!o.f(p)) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var obj = {x:42};
+var o1 = {};
+o1.f = function(o) { return o.x; };
+for (var i = 0; i < 10000; i++) TestInlineOneParam(o1, obj);
+TestInlineOneParam({f: o1.f}, {x:42});
+
+
+function TestInlineTwoParams(o, p) {
+  var y = 43;
+  // Effect context.
+  o.h(y, y);
+  // Value context.
+  var x = o.h(p, y);
+  assertEquals(true, x);
+  assertEquals(false, o.h(y, p));
+  // Test context.
+  if (!o.h(p, y)) {
+    assertTrue(false);  // Should not happen.
+  }
+
+  // Perform the same tests again, but this time with non-trivial
+  // expressions as the parameters.
+
+  // Effect context.
+  o.h(y + 1, y + 1);
+  // Value context.
+  var x = o.h(p + 1, y + 1);
+  assertEquals(true, x);
+  assertEquals(false, o.h(y + 1, p + 1));
+  // Test context.
+  if (!o.h(p + 1, y + 1)) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o2 = {};
+o2.h = function(i, j) { return i < j; };
+for (var i = 0; i < 10000; i++) TestInlineTwoParams(o2, 42);
+TestInlineTwoParams({h: o2.h}, 42);
diff --git a/test/mjsunit/compiler/inline-two.js b/test/mjsunit/compiler/inline-two.js
new file mode 100644
index 0000000..30f579d
--- /dev/null
+++ b/test/mjsunit/compiler/inline-two.js
@@ -0,0 +1,93 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that calls another function.
+function TestInlineX(o) {
+  // Effect context.
+  o.g();
+  // Value context.
+  var x = o.g();
+  assertEquals(42, x);
+  assertEquals(42, o.g());
+  // Test context.
+  if (!o.g()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o2 = {};
+o2.size = function() { return 42; }
+o2.g = function() { return this.size(); };
+for (var i = 0; i < 10000; i++) TestInlineX(o2);
+TestInlineX({g: o2.g, size:o2.size});
+
+
+// Test that we can inline a call on a non-variable receiver.
+function TestInlineX2(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var obj = {}
+obj.foo = function() { return 42; }
+var o3 = {};
+o3.v = obj;
+o3.h = function() { return this.v.foo(); };
+for (var i = 0; i < 10000; i++) TestInlineX2(o3);
+TestInlineX2({h: o3.h, v:obj});
+
+
+// Test that we can inline a call on a non-variable receiver.
+function TestInlineFG(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var obj = {}
+obj.g = function() { return 42; }
+var o3 = {};
+o3.v = obj;
+o3.f = function() { return this.v; }
+o3.h = function() { return this.f().g(); };
+for (var i = 0; i < 10000; i++) TestInlineFG(o3);
+TestInlineFG({h: o3.h, f: o3.f, v:obj});
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/logical-and.js
similarity index 62%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/logical-and.js
index 6e292d6..1d31a0a 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/logical-and.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,46 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function AndBB(x,y) {
+  return (x == 0) && (y == 0);
 }
 
-assertFalse(foo());
+function AndBN(x,y) {
+  return (x == 0) && y;
+}
+
+function AndNB(x,y) {
+  return x && (y == 0);
+}
+
+function AndNN(x,y) {
+  return x && y;
+}
+
+assertTrue(AndBB(0, 0));
+assertFalse(AndBB(1, 0));
+assertFalse(AndBB(0, 1));
+assertFalse(AndBB(1, 1));
+
+assertFalse(AndBN(0, 0));
+assertTrue(AndBN(0, 1));
+assertFalse(AndBN(1, 0));
+assertEquals(1, AndBN(0, 1));
+assertEquals(2, AndBN(0, 2));
+assertFalse(AndBN(1, 1));
+assertFalse(AndBN(1, 2));
+
+assertEquals(0, AndNB(0, 0));
+assertTrue(AndNB(1, 0));
+assertEquals(0, AndNB(0, 1));
+assertEquals("", AndNB("", 1));
+assertFalse(AndNB(1, 1));
+assertTrue(AndNB(2, 0));
+
+assertEquals(0, AndNN(0, 0));
+assertEquals(0, AndNN(1, 0));
+assertEquals(0, AndNN(2, 0));
+assertEquals(0, AndNN(0, 1));
+assertEquals(0, AndNN(0, 2));
+assertEquals(1, AndNN(1, 1));
+assertEquals(2, AndNN(3, 2));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/logical-or.js
similarity index 65%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/logical-or.js
index e8028ce..87c630d 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/logical-or.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,42 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function OrBB(x,y) {
+  return (x == 0) || (y == 0);
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function OrBN(x,y) {
+  return (x == 0) || y;
+}
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function OrNB(x,y) {
+  return x || (y == 0);
+}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+function OrNN(x,y) {
+  return x || y;
+}
+
+assertTrue(OrBB(0, 0));
+assertTrue(OrBB(1, 0));
+assertTrue(OrBB(0, 1));
+assertFalse(OrBB(1, 1));
+
+assertTrue(OrBN(0, 0));
+assertEquals(0, OrBN(1, 0));
+assertTrue(OrBN(0, 1));
+assertEquals(1, OrBN(1, 1));
+assertEquals(2, OrBN(1, 2));
+
+assertTrue(OrNB(0, 0));
+assertEquals(1, OrNB(1, 0));
+assertFalse(OrNB(0, 1));
+assertEquals(1, OrNB(1, 1));
+assertEquals(2, OrNB(2, 1));
+
+assertEquals(0, OrNN(0, 0));
+assertEquals(1, OrNN(1, 0));
+assertEquals(2, OrNN(2, 0));
+assertEquals(1, OrNN(0, 1));
+assertEquals(2, OrNN(0, 2));
+assertEquals(1, OrNN(1, 2));
diff --git a/test/mjsunit/compiler/loops.js b/test/mjsunit/compiler/loops.js
index 4de45e7..2195c6c 100644
--- a/test/mjsunit/compiler/loops.js
+++ b/test/mjsunit/compiler/loops.js
@@ -33,3 +33,29 @@
   n = n * i;
 }
 assertEquals(120, n);
+
+// Test assignments in the loop condition.
+function f(i, n) {
+  while((n = n - 1) >= 0) {
+    i = n + 1;
+  }
+  return i;
+}
+assertEquals(1, f(0, 42));
+
+
+// Test do-while loop and continue.
+function g(a) {
+  var x = 0, c = 0;
+  do {
+    x = x + 1;
+    if (x < 5) continue;
+    c = c + 1;
+  } while(x < a);
+  return c;
+}
+
+assertEquals(6, g(10));
+
+// Test deoptimization in the loop condition.
+assertEquals(0, g("foo"));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/null-compare.js
similarity index 66%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/null-compare.js
index e8028ce..e01b555 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/null-compare.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,30 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function IsNull(x) {
+  if (x == null) return true; else return false;
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+assertTrue(IsNull(null), "null == null");
+assertTrue(IsNull(void 0), "void 0 == null");
+assertFalse(IsNull(42), "42 != null");
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+function IsNullStrict(x) {
+  if (x === null) return true; else return false;
+}
+
+assertTrue(IsNullStrict(null), "null === null");
+assertFalse(IsNullStrict(void 0), "void 0 != null");
+assertFalse(IsNullStrict(87), "87 !== null");
+
+
+function GimmeFalse(x) {
+  if ((x & 1) == null) return true;
+  if ((x | 3) === null) return true;
+  return false;
+}
+
+assertFalse(GimmeFalse(1), "GimmeFalse(1)");
+assertFalse(GimmeFalse(null), "GimmeFalse(null)");
+assertFalse(GimmeFalse({}), "GimmeFalse({})");
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/optimized-function-calls.js
similarity index 60%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/optimized-function-calls.js
index 6e292d6..1b5f3b0 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/optimized-function-calls.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,55 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Flags: --expose-gc
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f() {
+  gc();
+  return 87;
 }
 
-assertFalse(foo());
+
+var x = 42, y = 99;
+function g() {
+  return x | f() | (y | (x | (f() | x)));
+}
+f();  // Give us a chance to optimize f.
+assertEquals(42 | 87 | 99, g());
+
+
+// Regression test for issue where we would try do an illegal
+// compile-time lookup on a null prototype.
+var object = { f: function() { return 42; }, x: 42 };
+delete object.x;
+function call_f(o) {
+  return o.f();
+}
+for (var i = 0; i < 10000000; i++) call_f(object);
+
+
+// Check that nested global function calls work.
+function f0() {
+  return 42;
+}
+
+function f1(a) {
+  return a;
+}
+
+function f2(a, b) {
+  return a * b;
+}
+
+function f3(a, b, c) {
+  return a + b - c;
+}
+
+function f4(a, b, c, d) {
+  return a * b + c - d;
+}
+
+function nested() {
+  return f4(f3(f2(f1(f0()),f0()),f1(f0()),f0()),f2(f1(f0()),f0()),f1(f0()),f0())
+    + f4(f0(),f1(f0()),f2(f1(f0()),f0()),f3(f2(f1(f0()),f0()),f1(f0()),f0()));
+}
+assertEquals(3113460, nested());
diff --git a/test/mjsunit/compiler/pic.js b/test/mjsunit/compiler/pic.js
new file mode 100644
index 0000000..a0b5d8f
--- /dev/null
+++ b/test/mjsunit/compiler/pic.js
@@ -0,0 +1,66 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function GetX(o) { return o.x; }
+function CallF(o) { return o.f(); }
+function SetX(o) { o.x = 42; }
+function SetXY(o,y) { return o.x = y; }
+
+
+function Test(o) {
+  SetX(o);
+  assertEquals(42, GetX(o));
+  assertEquals(87, SetXY(o, 87));
+  assertEquals(87, GetX(o));
+  assertTrue(SetXY(o, o) === o);
+  assertTrue(o === GetX(o), "o === GetX(o)");
+  assertEquals("hest", SetXY(o, "hest"));
+  assertEquals("hest", GetX(o));
+  assertTrue(SetXY(o, Test) === Test);
+  assertTrue(Test === GetX(o), "Test === GetX(o)");
+  assertEquals(99, CallF(o));
+}
+
+// Create a bunch of objects with different layouts.
+var o1 = { x: 0, y: 1 };
+var o2 = { y: 1, x: 0 };
+var o3 = { y: 1, z: 2, x: 0 };
+o1.f = o2.f = o3.f = function() { return 99; }
+
+// Run the test until we're fairly sure we've optimized the
+// polymorphic property access.
+for (var i = 0; i < 1000000; i++) {
+  Test(o1);
+  Test(o2);
+  Test(o3);
+}
+
+// Make sure that the following doesn't crash.
+GetX(0);
+SetX(0);
+SetXY(0, 0);
+assertThrows("CallF(0)", TypeError);
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/property-calls.js
similarity index 82%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/property-calls.js
index 6e292d6..3366971 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/property-calls.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+function f(o) { return o.g(); }
+function g() { return 42; }
 
+var object = { };
+object.g = g;
+for (var i = 0; i < 10000000; i++) f(object);
+assertEquals(42, f(object));
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
-
-assertFalse(foo());
+object = { g: function() { return 87; } };
+assertEquals(87, f(object));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/property-refs.js
similarity index 74%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/property-refs.js
index e8028ce..3f6f793 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/property-refs.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function Load(o) {
+  return o.outer.x | o.outer.inner.y;
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function StoreXY(o, x, y) {
+  o.outer.x = x;
+  o.outer.inner.y = y;
+}
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function LoadXY(x, y) {
+  var object = {
+    outer: {
+      x: 0,
+      inner: { y: 0 }
+    }
+  };
+  StoreXY(object, x, y);
+  return Load(object);
+}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+for (var i = 0; i < 10000; i++) LoadXY(i, i);
+assertEquals(42 | 87, LoadXY(42, 87));
+assertEquals(42 | 87, LoadXY(42, 87));
+assertEquals(42 | 99, LoadXY(42, "99"));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/property-stores.js
similarity index 77%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/property-stores.js
index 6e292d6..0dec82a 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/property-stores.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+var a = 42;
 
+var obj = {x: 0,
+           f: function() { this.x = 7; },
+           g: function() { this.x = a | 1; },
+           h: function() { this.x = a; }};
 
-// Flags: --nofull-compiler
+var i;
+for (i = 0; i < 10000; i++) { obj.f(); }
+assertEquals(7, obj.x);
 
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
+for (i = 0; i < 10000; i++) { obj.g(); }
+assertEquals(43, obj.x);
 
-assertFalse(foo());
+for (i = 0; i < 10000; i++) { obj.h(); }
+assertEquals(42, obj.x);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/recursive-deopt.js
similarity index 72%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/recursive-deopt.js
index e8028ce..366f59a 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/recursive-deopt.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function f(n) {
+  // Force deopt in both leaf case and when returning. To make
+  // debugging easier, the operation that bails out (<<) is so simple
+  // that it doesn't cause GCs.
+  if (n == 0) return 1 << one;
+  return f(n - 1) << one;
+}
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function RunTests() {
+  assertEquals(1 << 1, f(0));
+  assertEquals(1 << 2, f(1));
+  assertEquals(1 << 5, f(4));
+}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+
+var one = 1;
+for (var i = 0; i < 1000000; i++) RunTests();
+
+var one = { valueOf: function() { return 1; } };
+for (var j = 0; j < 100000; j++) RunTests();
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-0.js
similarity index 84%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-0.js
index 6e292d6..df6dfee 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-0.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function TestNestedLoops() {
+  var sum = 0;
+  for (var i = 0; i < 200; i = i + 1) {
+    for (var j = 0; j < 200; j = j + 1) {
+      sum = sum + 1;
+    }
+  }
+  return sum;
 }
-
-assertFalse(foo());
+assertEquals(200 * 200, TestNestedLoops());
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-1.js
similarity index 82%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-1.js
index 6e292d6..cbae1a8 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-1.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function DaysInYear(y) {
+  if (y % 4 != 0) return 365;
+  if (y % 4 == 0 && y % 100 != 0) return 366;
+  if (y % 100 == 0 && y % 400 != 0) return 365;
+  if (y % 400 == 0) return 366;
 }
-
-assertFalse(foo());
+assertEquals(365, DaysInYear(1999));
+assertEquals(366, DaysInYear(2000));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-2.js
similarity index 76%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-2.js
index 6e292d6..a26ef32 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-2.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+// The compilation of this function currently fails when resolving
+// control flow in the register allocator.
+function TestCreateString(n)
+{
+  var l = n * 1;
+  var r = 'r';
+  while (r.length < n)
+  {
+    r = r + r;
+  }
+  return r;
 }
 
-assertFalse(foo());
+assertEquals("r", TestCreateString(1));
+assertEquals("rr", TestCreateString(2));
+assertEquals("rrrr", TestCreateString(3));
+assertEquals("rrrrrrrr", TestCreateString(6));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-3.js
similarity index 86%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-3.js
index 6e292d6..6aa7078 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-3.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function fib(n) {
+  var f0 = 0, f1 = 1;
+  for (; n > 0; n = n -1) {
+    var f2 = f0 + f1;
+    f0 = f1; f1 = f2;
+  }
+  return f0;
 }
 
-assertFalse(foo());
+assertEquals(2111485077978050, fib(75));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-3136962.js
similarity index 72%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-3136962.js
index e8028ce..147d833 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-3136962.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Reduced regression test for a global value numbering bug.  Original
+// value of global variable height was reused even after reassignment.
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+var height = 267;
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+var count = 0;
+function inner() { height = 0; ++count; }
+function outer() {}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+function test() {
+  for (var i = 0; i < height; ++i) {
+    for (var j = -6; j < 7; ++j) {
+      if (i + j < 0 || i + j >= height) continue;
+      for (var k = -6; k < 7; ++k) {
+        inner();
+      }
+    }
+    outer();
+  }
+}
+
+test();
+
+assertEquals(13, count);
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-3185901.js
similarity index 86%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-3185901.js
index 6e292d6..1e1bbe7 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-3185901.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Inlined function call in a test context.  Should never crash even
+// with --always-opt.
+var x;
 
+function f() { if (g()) { } }
+function g() { if (x) { return true; } }
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
-
-assertFalse(foo());
+f();
diff --git a/test/mjsunit/compiler/regress-3218915.js b/test/mjsunit/compiler/regress-3218915.js
new file mode 100644
index 0000000..d27c319
--- /dev/null
+++ b/test/mjsunit/compiler/regress-3218915.js
@@ -0,0 +1,48 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for failure to deoptimize properly when the most recent
+// side effect occurred in a comma expression in an effect context.
+
+// An unoptimizable function, calling it is a side effect.
+function side_effect() { try {} finally {} return "wrong"; }
+
+// A function to observe the value of its first argument.
+function observe(x, y) { try {} finally {} return x; }
+
+// If we optimize for x a smi, then x a string will deopt.  The side effect
+// immediately before the deopt is in a comma expresion in an effect context
+// (i.e., itself the left subexpression of a comma expression).
+function test(x) { return observe(this, ((0, side_effect()), x + 1)); }
+
+// Run test enough times to get it optimized.
+for (var i = 0; i < 1000000; ++i) test(0);
+
+// Force test to deopt.  If it behaves normally, it should return the global
+// object.  If the value of the call to side_effect() is lingering after the
+// deopt, it will return the string "wrong".
+assertFalse(test("a") === "wrong");
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-3249650.js
similarity index 69%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-3249650.js
index e8028ce..1f06090 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-3249650.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Among other things, this code covers the case of deoptimization
+// after a compare expression in an effect context.
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function f0(x) { try { } catch (e) {}}
+function f1(x) { try { } catch (e) {}}
+function f2(x) { try { } catch (e) {}}
+function f3(x) { try { } catch (e) {}}
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+var object = { a: "", b: false, c: {}};
+object.f = function(x) { return this; }
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+
+function test(x) {
+  f0(x);
+  f1(x);
+  f2(x);
+  f3(x);
+  x.a.b == "";
+  object.f("A").b = true;
+  object.f("B").a = "";
+  object.f("C").c.display = "A";
+  object.f("D").c.display = "A";
+}
+
+var x = {a: {b: "" }};
+for (var i = 0; i < 1000000; i++) test(x);
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-3260426.js
similarity index 79%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-3260426.js
index 6e292d6..dfef424 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-3260426.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Falling off the end of a function returns the undefined value
+// (false in a test context).  This should happen even when inlined
+// (e.g., if --always-opt) and when it is the only exit from the
+// function.
+function always_false() {}
+function test() { return always_false() ? 0 : 1; }
 
+assertEquals(1, test());
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
-
-assertFalse(foo());
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-4.js
similarity index 83%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-4.js
index 6e292d6..0ec9a12 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-4.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+// Test deoptimization after a loop.
+function f(p) {
+  var y=0;
+  for (var x=0; x<10; x++) {
+    if (x > 5) { y=y+p; break;}
+  }
+  return y+x;
 }
 
-assertFalse(foo());
+for (var i=0; i<10000000; i++) f(42);
+
+var result = f("foo");
+assertEquals("0foo6", result);
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-5.js
similarity index 83%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-5.js
index 6e292d6..5488d0e 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-5.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,19 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Test breaking out of labelled blocks.
+function f(y) {
+  var x = 0;
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+  foo: {
+    x++;
+    bar: {
+       if (y == 0) break bar; else break foo;
+    }
+    x++;
+  }
+  return x;
 }
 
-assertFalse(foo());
+assertEquals(2, f(0));
+assertEquals(1, f(1));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-6.js
similarity index 76%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-6.js
index 6e292d6..e92b0e5 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-6.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f(a, b, c) {
+  if (a == 0 || b == 0) return a;
+  return a + c;
 }
 
-assertFalse(foo());
+assertEquals(0, f(0, 0, 0));
+assertEquals(0, f(0, 1, 0));
+assertEquals(1, f(1, 0, 0));
+assertEquals(2, f(2, 1, 0));
+
+// Force deoptimization in --always-opt mode when evaluating
+// the 'a + c' expression. Make sure this doesn't end up
+// returning 'a'.
+assertEquals(1.5, f(1, 1, 0.5));
+assertEquals(2.5, f(2, 1, 0.5));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-7.js
similarity index 84%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-7.js
index 6e292d6..d6034f9 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-7.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Test correct truncation of tagged values.
+var G = 42;
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f() {
+  var v = G;
+  var w = v >> 0;
+  return w;
 }
 
-assertFalse(foo());
+for(var i=0; i<10000; i++) f();
+
+assertEquals(G, f());
+G = 2000000000;
+assertEquals(G, f());
diff --git a/test/mjsunit/compiler/regress-8.js b/test/mjsunit/compiler/regress-8.js
new file mode 100644
index 0000000..3a23885
--- /dev/null
+++ b/test/mjsunit/compiler/regress-8.js
@@ -0,0 +1,109 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for the register allocator.
+var gp = "";
+var yE = "";
+var W = "";
+var LA = "";
+var zE = "";
+var Fp = "";
+var AE = "";
+var Gob = "";
+var Hob = "";
+var Iob = "";
+var Job = "";
+var Kob = "";
+var Lob = "";
+var Mob = "";
+var p = "";
+function O() { this.append = function(a,b,c,d,e) { return a + b + c + d + e; } }
+
+function Nob(b,a) {
+ var c;
+ if (b==2) {
+   c=new O;
+   c.append(gp,
+            yE,
+            W,
+            LA+(a.Un+(zE+(Fp+(LA+(a.Im+(zE+(AE+(LA+(a.total+Gob))))))))),
+            p);
+   c=c.toString();
+ } else {
+   if (b==1) {
+     if(a.total>=2E6) {
+       c=new O;
+       c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Hob))))),p);
+       c=c.toString();
+     } else {
+       if(a.total>=2E5) {
+         c=new O;
+         c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Iob))))),p);
+         c=c.toString();
+       } else {
+         if(a.total>=2E4) {
+           c=new O;
+           c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Job))))),p);
+           c=c.toString();
+         } else {
+           if(a.total>=2E3) {
+             c=new O;
+             c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Kob))))),p);
+             c=c.toString();
+           } else {
+             if(a.total>=200) {
+               c=new O;
+               c.append(gp,yE,W,LA+(a.Un+(zE+(Fp+(LA+(a.Im+Lob))))),p);
+               c=c.toString();
+             } else {
+               c=new O;
+               c.append(gp,yE,W,
+                        LA+(a.Un+(zE+(Fp+(LA+(a.Im+(zE+(Mob+(LA+(a.total+zE))))))))),
+                        p);
+               c=c.toString();
+             }
+             c=c;
+           }
+           c=c;
+         }
+         c=c;
+       }
+       c=c;
+     }
+     c=c;
+   } else {
+     c=new O;
+     c.append(gp,yE,W,
+              LA+(a.Un+(zE+(Fp+(LA+(a.Im+(zE+(AE+(LA+(a.total+zE))))))))),
+              p);
+     c=c.toString();
+   }
+   c=c;
+ }
+ return c;
+}
+Nob(2, { Un: "" , Im: "" , total: 42});
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-arguments.js
similarity index 73%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-arguments.js
index e8028ce..234d3fb 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-arguments.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test of arguments.
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+// Test passing null or undefined as receiver.
+function f() { return this.foo; }
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function g() { return f.apply(null, arguments); }
+function h() { return f.apply(void 0, arguments); }
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+var foo = 42;
+
+for (var i=0; i<1000000; i++) assertEquals(42, g());
+for (var i=0; i<1000000; i++) assertEquals(42, h());
+
+var G1 = 21;
+var G2 = 22;
+
+function u() {
+ var v = G1 + G2;
+ return f.apply(v, arguments);
+}
+
+for (var i=0; i<1000000; i++) assertEquals(void 0, u());
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-arrayliteral.js
similarity index 86%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-arrayliteral.js
index 6e292d6..8938785 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-arrayliteral.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Regression test for array literals.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
-
-assertFalse(foo());
+var G = 41;
+var H = 42;
+function f() { var v = [G,H]; return v[1]; }
+assertEquals(42, f());
diff --git a/test/mjsunit/compiler/regress-funarguments.js b/test/mjsunit/compiler/regress-funarguments.js
new file mode 100644
index 0000000..cea40bc
--- /dev/null
+++ b/test/mjsunit/compiler/regress-funarguments.js
@@ -0,0 +1,82 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test function.arguments.
+
+function A() {}
+function B() {}
+
+function fee(x, y) {
+  if (x == 1) return fee["arg" + "uments"];
+  if (x == 2) return gee["arg" + "uments"];
+  return 42;
+}
+
+function gee(x) { return this.f(2 - x, "f"); }
+
+function foo(x, y) {
+  if (x == 0) return foo["arg" + "uments"];
+  if (x == 1) return goo["arg" + "uments"];
+  return 42;
+}
+
+function goo(x) { return this.f(x, "f"); }
+
+A.prototype.f = fee;
+A.prototype.g = gee;
+
+B.prototype.f = foo;
+B.prototype.g = goo;
+
+var o = new A();
+
+function hej(x) {
+  if (x == 0) return o.g(x, "h");
+  if (x == 1) return o.g(x, "h");
+  return o.g(x, "z");
+}
+
+function stress() {
+  for (var i=0; i<5000000; i++) o.g(i, "g");
+  for (var j=0; j<5000000; j++) hej(j);
+}
+
+stress();
+
+assertArrayEquals([0, "g"], o.g(0, "g"));
+assertArrayEquals([1, "f"], o.g(1, "g"));
+assertArrayEquals([0, "h"], hej(0));
+assertArrayEquals([1, "f"], hej(1));
+
+o = new B();
+
+stress();
+
+assertArrayEquals([0, "f"], o.g(0, "g"));
+assertArrayEquals([1, "g"], o.g(1, "g"));
+assertArrayEquals([0, "f"], hej(0));
+assertArrayEquals([1, "h"], hej(1));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-funcaller.js
similarity index 62%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-funcaller.js
index e8028ce..88db147 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-funcaller.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,49 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test function.caller.
+function A() {}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function fun(x) {
+  if (x == 0) return fun.caller;
+  if (x == 1) return gee.caller;
+  return 42;
+}
+function gee(x) { return this.f(x); }
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+A.prototype.f = fun;
+A.prototype.g = gee;
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+var o = new A();
+
+for (var i=0; i<5000000; i++) {
+  o.g(i);
+}
+assertEquals(gee, o.g(0));
+assertEquals(null, o.g(1));
+
+// Test when called from another function.
+function hej(x) {
+  if (x == 0) return o.g(x);
+  if (x == 1) return o.g(x);
+  return o.g(x);
+}
+
+for (var j=0; j<5000000; j++) {
+  hej(j);
+}
+assertEquals(gee, hej(0));
+assertEquals(hej, hej(1));
+
+// Test when called from eval.
+function from_eval(x) {
+  if (x == 0) return eval("o.g(x);");
+  if (x == 1) return eval("o.g(x);");
+  return o.g(x);
+}
+
+for (var j=0; j<5000000; j++) {
+  from_eval(j);
+}
+assertEquals(gee, from_eval(0));
+assertEquals(from_eval, from_eval(1));
diff --git a/test/mjsunit/compiler/regress-gap.js b/test/mjsunit/compiler/regress-gap.js
new file mode 100644
index 0000000..a812daa
--- /dev/null
+++ b/test/mjsunit/compiler/regress-gap.js
@@ -0,0 +1,130 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test that stresses the register allocator gap instruction.
+
+function small_select(n, v1, v2) {
+  for (var i = 0; i < n; ++i) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = tmp;
+  }
+  return v1;
+}
+
+function select(n, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) {
+  for (var i = 0; i < n; ++i) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = v6;
+    v6 = v7;
+    v7 = v8;
+    v8 = v9;
+    v9 = v10;
+    v10 = tmp;
+  }
+  return v1;
+}
+
+function select_while(n, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10) {
+  var i = 0;
+  while (i < n) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = v6;
+    v6 = v7;
+    v7 = v8;
+    v8 = v9;
+    v9 = v10;
+    v10 = tmp;
+    i++;
+  }
+  return v1;
+}
+
+function two_cycles(n, v1, v2, v3, v4, v5, x1, x2, x3, x4, x5) {
+  for (var i = 0; i < n; ++i) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = tmp;
+    tmp = x1;
+    x1 = x2;
+    x2 = x3;
+    x3 = x4;
+    x4 = x5;
+    x5 = tmp;
+  }
+  return v1 + x1;
+}
+
+function two_cycles_while(n, v1, v2, v3, v4, v5, x1, x2, x3, x4, x5) {
+  var i = 0;
+  while (i < n) {
+    var tmp = v1;
+    v1 = v2;
+    v2 = v3;
+    v3 = v4;
+    v4 = v5;
+    v5 = tmp;
+    tmp = x1;
+    x1 = x2;
+    x2 = x3;
+    x3 = x4;
+    x4 = x5;
+    x5 = tmp;
+    i++;
+  }
+  return v1 + x1;
+}
+assertEquals(1, small_select(0, 1, 2));
+assertEquals(2, small_select(1, 1, 2));
+assertEquals(1, small_select(10, 1, 2));
+
+assertEquals(1, select(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4, select(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(10, select(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+assertEquals(1 + 6, two_cycles(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4 + 9, two_cycles(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(5 + 10, two_cycles(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+assertEquals(1, select_while(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4, select_while(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(10, select_while(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+assertEquals(1 + 6, two_cycles_while(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(4 + 9, two_cycles_while(3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+assertEquals(5 + 10, two_cycles_while(9, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-gvn.js
similarity index 79%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-gvn.js
index 6e292d6..358daf7 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-gvn.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,27 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Flags: --noalways-opt
+//
+// Regression test for global value numbering.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function test(a) {
+  var res = a[0] + a[0];
+  if (res == 0) {
+    a[0] = 1;
+  }
+  return a[0];
 }
 
-assertFalse(foo());
+var a = new Array();
+
+var n = 100000000;
+
+var result = 0;
+for (var i = 0; i < n; ++i) {
+  a[0] = 0;
+  result += test(a);
+}
+
+
+assertEquals(n, result);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-intoverflow.js
similarity index 67%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-intoverflow.js
index e8028ce..d3842f1 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-intoverflow.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test overflow checks in optimized code.
+function testMul(a, b) {
+  a *= 2;
+  b *= 2;
+  if (a < 1 && b < 1) {
+    return a * b;
+  }
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+for (var i=0; i<1000000; i++) testMul(0,0);
+assertEquals(4611686018427388000, testMul(-0x40000000, -0x40000000));
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function testAdd(a, b) {
+  a *= 2;
+  b *= 2;
+  if (a < 1 && b < 1) {
+    return a + b;
+  }
+}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+for (var i=0; i<1000000; i++) testAdd(0,0);
+assertEquals(-4294967296, testAdd(-0x40000000, -0x40000000));
+
+
+function testSub(a, b) {
+  a *= 2;
+  b *= 2;
+  if (b == 2) {print(a); print(b);}
+  if (a < 1 && b < 3) {
+    return a - b;
+  }
+}
+
+for (var i=0; i<1000000; i++) testSub(0,0);
+assertEquals(-2147483650, testSub(-0x40000000, 1));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-loop-deopt.js
similarity index 85%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-loop-deopt.js
index 6e292d6..7906761 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-loop-deopt.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+// Test while loops and continue.
+function h() {
+  var i = 3, j = 0;
+  while(--i >= 0) {
+    var x = i & 1;
+    if(x > 0) {
+      continue;
+    }
+    j++;
+  }
+  return j;
 }
 
-assertFalse(foo());
+assertEquals(2, h());
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-max.js
similarity index 85%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-max.js
index 6e292d6..94c543a 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-max.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Test Math.max with negative zero as input.
+function f(x, y) { return Math.max(x, y) }
 
+for (var i = 0; i < 1000000; i++) f(0, 0);
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
-
-assertFalse(foo());
+var r = f(-0, -0);
+assertEquals(-Infinity, 1 / r);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-or.js
similarity index 72%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-or.js
index e8028ce..89f7802 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-or.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,33 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test deoptimization inside short-circuited expressions.
+function f1(x) {
+  var c = "fail";
+  if (!x || g1()) {
+    c = ~x;
+  }
+  return c;
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function g1() { try { return 1; } finally {} }
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+for (var i=0; i<10000000; i++) f1(42);
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+assertEquals(-1, f1(0));
+assertEquals(-43, f1(42));
+assertEquals(-1, f1(""));
+
+function f2(x) {
+  var c = "fail";
+  if (!x || !g2()) {
+    c = ~x;
+  }
+  return c;
+}
+
+function g2() { try { return 0; } finally {} }
+
+for (var i=0; i<10000000; i++) f2(42);
+
+assertEquals(-1, f2(""));
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-rep-change.js
similarity index 81%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-rep-change.js
index 6e292d6..9370999 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-rep-change.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Regression test for the case where a phi has two input operands with
+// the same value.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function test(start) {
+  if (true) {
+    for (var i = start; i < 10; i++) { }
+  }
+  for (var i = start; i < 10; i++) { }
 }
 
-assertFalse(foo());
+var n = 5000000;
+
+for (var i = 0; i < n; ++i) {
+  test(0);
+}
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/compiler/regress-stacktrace-methods.js
similarity index 62%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/compiler/regress-stacktrace-methods.js
index e8028ce..4900ccf 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/compiler/regress-stacktrace-methods.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,40 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+// Test stack traces with method calls.
+function Hest() {}
+function Svin() {}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+Svin.prototype.two = function() { /* xxxxxxx */ o.three(); }
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+Hest.prototype.one = function(x) { x.two(); }
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+Hest.prototype.three = function() { if (v == 42) throw new Error("urg"); }
+
+var o = new Hest();
+var s = new Svin();
+var v = 0;
+
+for (var i = 0; i < 1000000; i++) {
+  o.one(s);
+}
+
+v = 42;
+
+try {
+  o.one(s);
+} catch (e) {
+  var stack = e.stack.toString();
+  var p3 = stack.indexOf("at Hest.three");
+  var p2 = stack.indexOf("at Svin.two");
+  var p1 = stack.indexOf("at Hest.one");
+  assertTrue(p3 != -1);
+  assertTrue(p2 != -1);
+  assertTrue(p1 != -1);
+  assertTrue(p3 < p2);
+  assertTrue(p2 < p1);
+  assertTrue(stack.indexOf("36:56") != -1);
+  assertTrue(stack.indexOf("32:51") != -1);
+  assertTrue(stack.indexOf("34:38") != -1);
+  assertTrue(stack.indexOf("49:5") != -1);
+}
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/regress-stacktrace.js
similarity index 73%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/regress-stacktrace.js
index 6e292d6..843dd12 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/regress-stacktrace.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,28 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Test correctness of stack traces with global functions.
+eval("function two() { /* xxxxxxx */ three(); }");
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function one() {
+  two();
 }
 
-assertFalse(foo());
+function three() {
+  throw new Error("urg");
+}
+
+try {
+ one();
+} catch (e) {
+  var stack = e.stack.toString();
+  var p3 = stack.indexOf("at three");
+  var p2 = stack.indexOf("at two");
+  var p1 = stack.indexOf("at one");
+  assertTrue(p3 != -1);
+  assertTrue(p2 != -1);
+  assertTrue(p1 != -1);
+  assertTrue(p3 < p2);
+  assertTrue(p2 < p1);
+  print(stack);
+}
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/safepoint.js
similarity index 86%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/safepoint.js
index 6e292d6..ee8fcf0 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/safepoint.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Flags: --expose-gc
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function Test(o) {
+  var x = o;
+  var y = this;
+  x.gc();
+  x.gc();
+  return y;
 }
 
-assertFalse(foo());
+var o = {gc:gc};
+assertTrue(Test(o) === this);
diff --git a/test/mjsunit/compiler/simple-bailouts.js b/test/mjsunit/compiler/simple-bailouts.js
index af80b7f..ef7a0f4 100644
--- a/test/mjsunit/compiler/simple-bailouts.js
+++ b/test/mjsunit/compiler/simple-bailouts.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
 function Test() {
   this.result = 0;
   this.x = 0;
@@ -92,6 +90,14 @@
        | a;             // 1.1
 }
 
+Test.prototype.test10 = function() {
+  this.z = (a >> b) | (c >> c);
+}
+
+Test.prototype.test11 = function(x) {
+  this.z = x >> x;
+}
+
 var t = new Test();
 
 t.test0();
@@ -125,3 +131,13 @@
 assertEquals(14, t.x);
 assertEquals(6, t.y);
 assertEquals(15, t.z);
+
+a = "2";
+t.test11(a);
+assertEquals(0, t.z);
+
+a = 4;
+b = "1";
+c = 2;
+t.test10();
+assertEquals(2, t.z);
diff --git a/test/mjsunit/compiler/simple-binary-op.js b/test/mjsunit/compiler/simple-binary-op.js
index 15e1a55..a4e8ab5 100644
--- a/test/mjsunit/compiler/simple-binary-op.js
+++ b/test/mjsunit/compiler/simple-binary-op.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
 var a = 1;
 var b = 2;
 var c = 4;
diff --git a/test/mjsunit/compiler/simple-deopt.js b/test/mjsunit/compiler/simple-deopt.js
new file mode 100644
index 0000000..8befd9f
--- /dev/null
+++ b/test/mjsunit/compiler/simple-deopt.js
@@ -0,0 +1,101 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f(x) {
+  return ~x;
+}
+
+f(42);
+assertEquals(~12, f(12.45));
+assertEquals(~42, f(42.87));
+
+
+var a = 1, b = 2, c = 4, d = 8;
+function g() {
+  return a | (b | (c | d));
+}
+
+g();
+c = "16";
+assertEquals(1 | 2 | 16 | 8, g());
+
+
+// Test deopt when global function changes.
+function h() {
+  return g();
+}
+assertEquals(1 | 2 | 16 | 8, h());
+g = function() { return 42; };
+assertEquals(42, h());
+
+
+// Test deopt when map changes.
+var obj = {};
+obj.g = g;
+function k(o) {
+  return o.g();
+}
+for (var i = 0; i < 1000000; i++) k(obj);
+assertEquals(42, k(obj));
+assertEquals(87, k({g: function() { return 87; }}));
+
+
+// Test deopt with assignments to parameters.
+function p(x,y) {
+  x = 42;
+  y = 1;
+  y = y << "0";
+  return x | y;
+}
+assertEquals(43, p(0,0));
+
+
+// Test deopt with literals on the expression stack.
+function LiteralToStack(x) {
+  return 'lit[' + (x + ']');
+}
+
+assertEquals('lit[-87]', LiteralToStack(-87));
+assertEquals('lit[0]', LiteralToStack(0));
+assertEquals('lit[42]', LiteralToStack(42));
+
+
+// Test deopt before call.
+var str = "abc";
+var r;
+function CallCharAt(n) { return str.charAt(n); }
+for (var i = 0; i < 1000000; i++) {
+  r = CallCharAt(0);
+}
+assertEquals("a", r);
+
+
+// Test of deopt in presence of spilling.
+function add4(a,b,c,d) {
+  return a+b+c+d;
+}
+assertEquals(0x40000003, add4(1,1,2,0x3fffffff));
diff --git a/test/mjsunit/compiler/simple-global-access.js b/test/mjsunit/compiler/simple-global-access.js
index 35746ba..87a641c 100644
--- a/test/mjsunit/compiler/simple-global-access.js
+++ b/test/mjsunit/compiler/simple-global-access.js
@@ -25,9 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
-// Test global variable loads with the fast compiler.
+// Test global variable loads.
 var g1 = 42;
 var g2 = 43;
 var g3 = 44;
diff --git a/test/mjsunit/compiler/simple-inlining.js b/test/mjsunit/compiler/simple-inlining.js
new file mode 100644
index 0000000..219580f
--- /dev/null
+++ b/test/mjsunit/compiler/simple-inlining.js
@@ -0,0 +1,146 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we can inline a function that returns a constant.
+function TestInlineConstant(o) {
+  // Effect context.
+  o.f();
+  // Value context.
+  var x = o.f();
+  assertEquals(42, x);
+  assertEquals(42, o.f());
+  // Test context.
+  if (!o.f()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o1 = {};
+o1.f = function() { return 42; };
+for (var i = 0; i < 10000; i++) TestInlineConstant(o1);
+TestInlineConstant({f: o1.f});
+
+
+// Test that we can inline a function that returns 'this'.
+function TestInlineThis(o) {
+  // Effect context.
+  o.g();
+  // Value context.
+  var x = o.g();
+  assertEquals(o, x);
+  assertEquals(o, o.g());
+  // Test context.
+  if (!o.g()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o2 = {};
+o2.g = function() { return this; };
+for (var i = 0; i < 10000; i++) TestInlineThis(o2);
+TestInlineThis({g: o2.g});
+
+
+// Test that we can inline a function that returns 'this.x'.
+function TestInlineThisX(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o3 = {y:0,x:42};
+o3.h = function() { return this.x; };
+for (var i = 0; i < 10000; i++) TestInlineThisX(o3);
+TestInlineThisX({h: o3.h, x:42});
+
+
+// Test that we can inline a function that returns 'this.x.length'.
+function TestInlineThisXLength(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(3, x);
+  assertEquals(3, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o4 = {x:[1,2,3]};
+o4.h = function() { return this.x.length; };
+for (var i = 0; i < 10000; i++) TestInlineThisXLength(o4);
+TestInlineThisXLength({h: o4.h, x:[1,2,3]});
+
+
+// Test that we can inline a function that returns 'this.x.y'.
+function TestInlineThisXY(o) {
+  // Effect context.
+  o.h();
+  // Value context.
+  var x = o.h();
+  assertEquals(42, x);
+  assertEquals(42, o.h());
+  // Test context.
+  if (!o.h()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o6 = {y:42}
+var o5 = {e:o6};
+o5.h = function() { return this.e.y; };
+for (var i = 0; i < 10000; i++) TestInlineThisXY(o5);
+TestInlineThisXY({h: o5.h, e:o6});
+
+
+// Test that we can inline a function that returns 'this.x.length'.
+function TestInlineThisX0(o) {
+  // Effect context.
+  o.foo();
+  // Value context.
+  var x = o.foo();
+  assertEquals(42, x);
+  assertEquals(42, o.foo());
+  // Test context.
+  if (!o.foo()) {
+    assertTrue(false);  // Should not happen.
+  }
+}
+
+var o7 = {x:[42,43,44]};
+o7.foo = function() { return this.x[0]; };
+for (var i = 0; i < 10000; i++) TestInlineThisX0(o7);
+TestInlineThisX0({foo: o7.foo, x:[42,0,0]});
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/simple-osr.js
similarity index 82%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/simple-osr.js
index 6e292d6..8ec1b2b 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/simple-osr.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Flags: --use-osr
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f() {
+  var sum = 0;
+  for (var i = 0; i < 1000000; i++) {
+    var x = i + 2;
+    var y = x + 5;
+    var z = y + 3;
+    sum += z;
+  }
+  return sum;
 }
 
-assertFalse(foo());
+
+for (var i = 0; i < 2; i++) {
+  assertEquals(500009500000, f());
+}
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/compiler/switch-bailout.js
similarity index 81%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/compiler/switch-bailout.js
index 6e292d6..8011d44 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/compiler/switch-bailout.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
-
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+// Test that bailing out of the optimized compilation doesn't mess with
+// the labels in the AST.
+function f(x) {
+  switch (x) {
+    case "foo": return 87;
+    case "bar": return 42;
+  }
+  return 99;
 }
 
-assertFalse(foo());
+for (var i = 0; i < 10000; i++) f("foo");
+assertEquals(42, f("bar"));
diff --git a/test/mjsunit/compiler/this-property-refs.js b/test/mjsunit/compiler/this-property-refs.js
index 5e8ea59..1ee8e50 100644
--- a/test/mjsunit/compiler/this-property-refs.js
+++ b/test/mjsunit/compiler/this-property-refs.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --fast-compiler
-
 // Test references to properties of this.
 function Test() {
   this.a = 0;
diff --git a/test/mjsunit/compiler/thisfunction.js b/test/mjsunit/compiler/thisfunction.js
index 098fc3a..7615561 100644
--- a/test/mjsunit/compiler/thisfunction.js
+++ b/test/mjsunit/compiler/thisfunction.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --always-full-compiler
-
 // Test reference to this-function.
 
 var g = (function f(x) {
diff --git a/test/mjsunit/compiler/variables.js b/test/mjsunit/compiler/variables.js
new file mode 100644
index 0000000..fac4878
--- /dev/null
+++ b/test/mjsunit/compiler/variables.js
@@ -0,0 +1,73 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Simple tests of the various kinds of variable references in the
+// implementstion.
+
+// Global variables.
+var x = 0;
+function f0() { return x; }
+assertEquals(0, f0());
+
+
+// Parameters.
+function f1(x) { return x; }
+assertEquals(1, f1(1));
+
+
+// Stack-allocated locals.
+function f2() { var x = 2; return x; }
+assertEquals(2, f2());
+
+
+// Context-allocated locals.  Local function forces x into f3's context.
+function f3(x) {
+  function g() { return x; }
+  return x;
+}
+assertEquals(3, f3(3));
+
+// Local function reads x from an outer context.
+function f4(x) {
+  function g() { return x; }
+  return g();
+}
+assertEquals(4, f4(4));
+
+
+// Lookup slots.  'With' forces x to be looked up at runtime.
+function f5(x) {
+  with ({}) return x;
+}
+assertEquals(5, f5(5));
+
+
+// Parameters rewritten to property accesses.  Using the name 'arguments'
+// (even if it shadows the arguments object) forces all parameters to be
+// rewritten to explicit property accesses.
+function f6(arguments) { return arguments; }
+assertEquals(6, f6(6));
diff --git a/test/mjsunit/date.js b/test/mjsunit/date.js
index 57fc5a0..f13af82 100644
--- a/test/mjsunit/date.js
+++ b/test/mjsunit/date.js
@@ -167,8 +167,8 @@
 // Modified test from WebKit
 // LayoutTests/fast/js/script-tests/date-utc-timeclip.js:
 
-assertEquals(Date.UTC(275760, 8, 12, 23, 59, 59, 999), 8639999999999999);
-assertEquals(Date.UTC(275760, 8, 13), 8640000000000000);
+assertEquals(8639999999999999, Date.UTC(275760, 8, 12, 23, 59, 59, 999));
+assertEquals(8640000000000000, Date.UTC(275760, 8, 13));
 assertTrue(isNaN(Date.UTC(275760, 8, 13, 0, 0, 0, 1)));
 assertTrue(isNaN(Date.UTC(275760, 8, 14)));
 
@@ -176,3 +176,14 @@
 assertEquals(Date.UTC(-271821, 3, 20), -8640000000000000);
 assertTrue(isNaN(Date.UTC(-271821, 3, 19, 23, 59, 59, 999)));
 assertTrue(isNaN(Date.UTC(-271821, 3, 19)));
+
+
+// Test creation of large date values.
+d = new Date(1969, 12, 1, 99999999999);
+assertTrue(isNaN(d.getTime()));
+d = new Date(1969, 12, 1, -99999999999);
+assertTrue(isNaN(d.getTime()));
+d = new Date(1969, 12, 1, Infinity);
+assertTrue(isNaN(d.getTime()));
+d = new Date(1969, 12, 1, -Infinity);
+assertTrue(isNaN(d.getTime()));
diff --git a/test/mjsunit/debug-changebreakpoint.js b/test/mjsunit/debug-changebreakpoint.js
index 936523a..897c3e3 100644
--- a/test/mjsunit/debug-changebreakpoint.js
+++ b/test/mjsunit/debug-changebreakpoint.js
@@ -33,6 +33,7 @@
 listenerComplete = false;
 exception = false;
 
+var breakpoint = -1;
 var base_request = '"seq":0,"type":"request","command":"changebreakpoint"'
 
 function safeEval(code) {
@@ -68,21 +69,21 @@
 
     testArguments(dcp, '{}', false);
     testArguments(dcp, '{"breakpoint":0,"condition":"false"}', false);
-    // TODO(1241036) change this to 2 when break points have been restructured.
-    testArguments(dcp, '{"breakpoint":3,"condition":"false"}', false);
+    testArguments(dcp, '{"breakpoint":' + (breakpoint + 1) + ',"condition":"false"}', false);
     testArguments(dcp, '{"breakpoint":"xx","condition":"false"}', false);
 
     // Test some legal clearbreakpoint requests.
-    testArguments(dcp, '{"breakpoint":1}', true);
-    testArguments(dcp, '{"breakpoint":1,"enabled":"true"}', true);
-    testArguments(dcp, '{"breakpoint":1,"enabled":"false"}', true);
-    testArguments(dcp, '{"breakpoint":1,"condition":"1==2"}', true);
-    testArguments(dcp, '{"breakpoint":1,"condition":"false"}', true);
-    testArguments(dcp, '{"breakpoint":1,"ignoreCount":7}', true);
-    testArguments(dcp, '{"breakpoint":1,"ignoreCount":0}', true);
+    var bp_str = '"breakpoint":' + breakpoint;;
+    testArguments(dcp, '{' + bp_str + '}', true);
+    testArguments(dcp, '{' + bp_str + ',"enabled":"true"}', true);
+    testArguments(dcp, '{' + bp_str + ',"enabled":"false"}', true);
+    testArguments(dcp, '{' + bp_str + ',"condition":"1==2"}', true);
+    testArguments(dcp, '{' + bp_str + ',"condition":"false"}', true);
+    testArguments(dcp, '{' + bp_str + ',"ignoreCount":7}', true);
+    testArguments(dcp, '{' + bp_str + ',"ignoreCount":0}', true);
     testArguments(
         dcp,
-        '{"breakpoint":1,"enabled":"true","condition":"false","ignoreCount":0}',
+        '{' + bp_str + ',"enabled":"true","condition":"false","ignoreCount":0}',
         true);
 
     // Indicate that all was processed.
@@ -99,8 +100,7 @@
 function g() {};
 
 // Set a break point and call to invoke the debug event listener.
-bp = Debug.setBreakPoint(g, 0, 0);
-assertEquals(1, bp);
+breakpoint = Debug.setBreakPoint(g, 0, 0);
 g();
 
 // Make sure that the debug event listener vas invoked.
diff --git a/test/mjsunit/debug-clearbreakpoint.js b/test/mjsunit/debug-clearbreakpoint.js
index 59479f2..58e1531 100644
--- a/test/mjsunit/debug-clearbreakpoint.js
+++ b/test/mjsunit/debug-clearbreakpoint.js
@@ -33,6 +33,7 @@
 listenerComplete = false;
 exception = false;
 
+var breakpoint = -1;
 var base_request = '"seq":0,"type":"request","command":"clearbreakpoint"'
 
 function safeEval(code) {
@@ -68,15 +69,14 @@
 
     testArguments(dcp, '{}', false);
     testArguments(dcp, '{"breakpoint":0}', false);
-    // TODO(1241036) change this to 2 when break points have been restructured.
-    testArguments(dcp, '{"breakpoint":3}', false);
+    testArguments(dcp, '{"breakpoint":' + (breakpoint + 1)+ '}', false);
     testArguments(dcp, '{"breakpoint":"xx"}', false);
 
     // Test some legal clearbreakpoint requests.
-    testArguments(dcp, '{"breakpoint":1}', true);
+    testArguments(dcp, '{"breakpoint":' + breakpoint + '}', true);
 
     // Cannot clear the same break point twice.
-    testArguments(dcp, '{"breakpoint":1}', false);
+    testArguments(dcp, '{"breakpoint":' + breakpoint + '}', false);
 
     // Indicate that all was processed.
     listenerComplete = true;
@@ -92,8 +92,7 @@
 function g() {};
 
 // Set a break point and call to invoke the debug event listener.
-bp = Debug.setBreakPoint(g, 0, 0);
-assertEquals(1, bp);
+breakpoint = Debug.setBreakPoint(g, 0, 0);
 g();
 
 // Make sure that the debug event listener vas invoked.
diff --git a/test/mjsunit/debug-clearbreakpointgroup.js b/test/mjsunit/debug-clearbreakpointgroup.js
index e6677f9..0cfc5c9 100644
--- a/test/mjsunit/debug-clearbreakpointgroup.js
+++ b/test/mjsunit/debug-clearbreakpointgroup.js
@@ -115,3 +115,8 @@
 assertEquals([bp2, bp3, bp5].sort(), breakpointNumbers.sort());
 
 assertFalse(exception, "exception in listener");
+
+// Clear all breakpoints to allow the test to run again (--stress-opt).
+Debug.clearBreakPoint(bp2);
+Debug.clearBreakPoint(bp3);
+Debug.clearBreakPoint(bp5);
diff --git a/test/mjsunit/debug-evaluate-with-context.js b/test/mjsunit/debug-evaluate-with-context.js
new file mode 100644
index 0000000..5e1c83c
--- /dev/null
+++ b/test/mjsunit/debug-evaluate-with-context.js
@@ -0,0 +1,144 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var evaluate_callback;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    var context = { what_is_capybara: "a fish" };
+    var context2 = { what_is_capybara: "a fish", what_is_parrot: "a beard" };
+
+    // Try in frame's scope.
+    var local_expression =
+        "(what_is_capybara ? what_is_capybara : 'a beast') + '/' + what_is_parrot";
+    var result = evaluate_callback.in_top_frame(exec_state, local_expression, context);
+    assertEquals('a fish/a bird', result);
+
+    // Try in frame's scope with overrididen local variables.
+    var result = evaluate_callback.in_top_frame(exec_state, local_expression, context2);
+    assertEquals('a fish/a beard', result);
+
+    // Try in frame's scope, without context.
+    var local_expression2 = "what_is_parrot";
+    var result = evaluate_callback.in_top_frame(exec_state, local_expression2, void 0);
+    assertEquals('a bird', result);
+
+    // Try in global additional scope.
+    var global_expression = "what_is_capybara ? what_is_capybara : 'a beast'";
+    var result = evaluate_callback.globally(exec_state, global_expression, context);
+    assertEquals('a fish', result);
+
+    // Try in global scope with overridden global variables.
+    var context_with_undefined = { undefined: 'kitten' };
+    var global_expression2 = "'cat' + '/' + undefined";
+    var result = evaluate_callback.globally(exec_state, global_expression2, context_with_undefined);
+    assertEquals('cat/kitten', result);
+
+    // Try in global scope with no overridden global variables.
+    var result = evaluate_callback.globally(exec_state, global_expression2, void 0);
+    assertEquals('cat/undefined', result);
+
+    // Try in global scope without additional context.
+    var global_expression3 = "'cat' + '/' + 'dog'";
+    var result = evaluate_callback.globally(exec_state, global_expression3, void 0);
+    assertEquals('cat/dog', result);
+
+    listenerComplete = true;
+  } catch (e) {
+    exception = e
+  };
+};
+
+
+function f() {
+  var what_is_parrot = "a bird";
+  debugger;
+};
+
+function runF() {
+  exception = false;
+  listenerComplete = false;
+
+  Debug.setListener(listener);
+
+  // Add the debug event listener.
+  Debug.setListener(listener);
+
+  f();
+
+  assertFalse(exception, "exception in listener")
+  assertTrue(listenerComplete);
+}
+
+evaluate_callback = {
+  in_top_frame: function(exec_state, expression, additional_context) {
+    return exec_state.frame(0).evaluate(expression, void 0, additional_context).value();
+  },
+  globally: function(exec_state, expression, additional_context) {
+    return exec_state.evaluateGlobal(expression, void 0, additional_context).value();
+  },
+};
+
+
+runF();
+
+// Now try all the same, but via debug protocol.
+
+function evaluateViaProtocol(exec_state, expression, additional_context, frame_argument_adder) {
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+  request_json = {"seq":17,"type":"request","command":"evaluate", arguments: { "expression": expression } };
+  frame_argument_adder(request_json.arguments);
+  if (additional_context) {
+    var context_json = [];
+    for (var key in additional_context) {
+      context_json.push({ name: key, handle: Debug.MakeMirror(additional_context[key]).handle() });
+    }
+    request_json.arguments.additional_context = context_json;
+  }
+  var request = JSON.stringify(request_json);
+  var response_json = dcp.processDebugJSONRequest(request);
+  var response = JSON.parse(response_json);
+
+  assertTrue(response.success);
+  var str_result = response.body.value;
+  return str_result;
+}
+
+evaluate_callback = {
+  in_top_frame: function(exec_state, expression, additional_context) {
+    return evaluateViaProtocol(exec_state, expression, additional_context, function(args) { args.frame = 0; });
+  },
+  globally: function(exec_state, expression, additional_context) {
+    return evaluateViaProtocol(exec_state, expression, additional_context, function(args) { args.global = true; });
+  },
+};
+
+runF();
diff --git a/test/mjsunit/debug-liveedit-2.js b/test/mjsunit/debug-liveedit-2.js
index 94e2780..39ebf3a 100644
--- a/test/mjsunit/debug-liveedit-2.js
+++ b/test/mjsunit/debug-liveedit-2.js
@@ -31,17 +31,14 @@
 
 Debug = debug.Debug
 
-
-eval(
-    "function ChooseAnimal(p) {\n " +
-    "  if (p == 7) {\n" + // Use p
-    "    return;\n" +
-    "  }\n" +
-    "  return function Chooser() {\n" +
-    "    return 'Cat';\n" +
-    "  };\n" +
-    "}\n"
-);
+eval("function ChooseAnimal(p) {\n " +
+     "  if (p == 7) {\n" + // Use p
+     "    return;\n" +
+     "  }\n" +
+     "  return function Chooser() {\n" +
+     "    return 'Cat';\n" +
+     "  };\n" +
+     "}\n");
 
 var old_closure = ChooseAnimal(19);
 
@@ -67,4 +64,3 @@
 
 // Old instance of closure is not patched.
 assertEquals("Cat", old_closure());
-
diff --git a/test/mjsunit/debug-liveedit-breakpoints.js b/test/mjsunit/debug-liveedit-breakpoints.js
index f01a8c4..1d28ab9 100644
--- a/test/mjsunit/debug-liveedit-breakpoints.js
+++ b/test/mjsunit/debug-liveedit-breakpoints.js
@@ -62,6 +62,8 @@
 
 var script = Debug.findScript(F25);
 
+assertEquals(0, Debug.scriptBreakPoints().length);
+
 Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script.id, 1, 1, "true || false || false");
 Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script.id, 6, 1, "true || false || false");
 Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script.id, 14, 1, "true || false || false");
@@ -96,3 +98,16 @@
 assertTrue(break_position_map[1]);
 assertTrue(break_position_map[11]);
 
+// Delete all breakpoints to make this test reentrant.
+var breaks = Debug.scriptBreakPoints();
+var breaks_ids = [];
+
+for (var i = 0; i < breaks.length; i++) {
+  breaks_ids.push(breaks[i].number());
+}
+
+for (var i = 0; i < breaks_ids.length; i++) {
+  Debug.clearBreakPoint(breaks_ids[i]);
+}
+
+assertEquals(0, Debug.scriptBreakPoints().length);
diff --git a/test/mjsunit/debug-liveedit-patch-positions.js b/test/mjsunit/debug-liveedit-patch-positions.js
index 027987f..b0d3c20 100644
--- a/test/mjsunit/debug-liveedit-patch-positions.js
+++ b/test/mjsunit/debug-liveedit-patch-positions.js
@@ -30,7 +30,7 @@
 
 // Scenario: some function is being edited; the outer function has to have its
 // positions patched. Accoring to a special markup of function text
-// corresponding byte-code PCs should conicide before change and after it.
+// corresponding byte-code PCs should coincide before change and after it.
 
 Debug = debug.Debug
 
@@ -62,32 +62,65 @@
 function ReadPCMap(func, positions) {
   var res = new Array();
   for (var i = 0; i < positions.length; i++) {
-    res.push(Debug.LiveEdit.GetPcFromSourcePos(func, positions[i]));
+    var pc = Debug.LiveEdit.GetPcFromSourcePos(func, positions[i]);
+
+    if (typeof pc === 'undefined') {
+      // Function was marked for recompilation and it's code was replaced with a
+      // stub. This can happen at any time especially if we are running with
+      // --stress-opt. There is no way to get PCs now.
+      return;
+    }
+
+    res.push(pc);
   }
+
   return res;
 }
 
-var res = ChooseAnimal();
-assertEquals("Cat15", res);
+function ApplyPatch(orig_animal, new_animal) {
+  var res = ChooseAnimal();
+  assertEquals(orig_animal + "15", res);
 
-var markerPositionsBefore = ReadMarkerPositions(ChooseAnimal);
-var pcArrayBefore = ReadPCMap(ChooseAnimal, markerPositionsBefore);
+  var script = Debug.findScript(ChooseAnimal);
 
-var script = Debug.findScript(ChooseAnimal);
+  var orig_string = "'" + orig_animal + "'";
+  var patch_string = "'" + new_animal + "'";
+  var patch_pos = script.source.indexOf(orig_string);
 
-var orig_animal = "'Cat'";
-var patch_pos = script.source.indexOf(orig_animal);
-var new_animal_patch = "'Capybara'";
+  var change_log = new Array();
 
-var change_log = new Array();
-Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos, orig_animal.length, new_animal_patch, change_log);
-print("Change log: " + JSON.stringify(change_log) + "\n");
+  Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script,
+                                               patch_pos,
+                                               orig_string.length,
+                                               patch_string,
+                                               change_log);
 
-var res = ChooseAnimal();
-assertEquals("Capybara15", res);
+  print("Change log: " + JSON.stringify(change_log) + "\n");
 
-var markerPositionsAfter = ReadMarkerPositions(ChooseAnimal);
-var pcArrayAfter = ReadPCMap(ChooseAnimal, markerPositionsAfter);
+  var markerPositions = ReadMarkerPositions(ChooseAnimal);
+  var pcArray = ReadPCMap(ChooseAnimal, markerPositions);
 
-assertArrayEquals(pcArrayBefore, pcArrayAfter);
+  var res = ChooseAnimal();
+  assertEquals(new_animal + "15", res);
 
+  return pcArray;
+}
+
+var pcArray1 = ApplyPatch('Cat', 'Dog');
+
+// When we patched function for the first time it was deoptimized.
+// Check that after the second patch maping between sources position and
+// pcs will not change.
+
+var pcArray2 = ApplyPatch('Dog', 'Capybara');
+
+print(pcArray1);
+print(pcArray2);
+
+// Function can be marked for recompilation at any point (especially if we are
+// running with --stress-opt). When we mark function for recompilation we
+// replace it's code with stub. So there is no reliable way to get PCs for
+// function.
+if (pcArray1 && pcArray2) {
+  assertArrayEquals(pcArray1, pcArray2);
+}
diff --git a/test/mjsunit/debug-stepout-recursive-function.js b/test/mjsunit/debug-stepout-recursive-function.js
index 475fe26..3741f26 100644
--- a/test/mjsunit/debug-stepout-recursive-function.js
+++ b/test/mjsunit/debug-stepout-recursive-function.js
@@ -97,7 +97,7 @@
 EndTest(2);
 
 BeginTest('Test 4');
-shouldBreak = function(x) { print(x); return x == 1 || x == 3; };
+shouldBreak = function(x) { return x == 1 || x == 3; };
 step_out_count = 2;
 fact(3);
 EndTest(3);
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index cf08d7a..020e3c0 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -144,6 +144,9 @@
   "NewArgumentsFast": true,
   "PushContext": true,
   "LazyCompile": true,
+  "LazyRecompile": true,
+  "NotifyDeoptimized": true,
+  "NotifyOSR": true,
   "CreateObjectLiteralBoilerplate": true,
   "CloneLiteralBoilerplate": true,
   "CloneShallowLiteralBoilerplate": true,
diff --git a/test/mjsunit/get-own-property-descriptor.js b/test/mjsunit/get-own-property-descriptor.js
index 79c1fac..ceb7715 100644
--- a/test/mjsunit/get-own-property-descriptor.js
+++ b/test/mjsunit/get-own-property-descriptor.js
@@ -103,19 +103,3 @@
 objWithProto[0] = 'bar';
 var descWithProto = Object.getOwnPropertyDescriptor(objWithProto, '10');
 assertEquals(undefined, descWithProto);
-
-// Test elements on global proxy object.
-var global = (function() { return this; })();
-
-global[42] = 42;
-
-function el_getter() { return 239; };
-function el_setter() {};
-Object.defineProperty(global, '239', {get: el_getter, set: el_setter});
-
-var descRegularElement = Object.getOwnPropertyDescriptor(global, '42');
-assertEquals(42, descRegularElement.value);
-
-var descAccessorElement = Object.getOwnPropertyDescriptor(global, '239');
-assertEquals(el_getter, descAccessorElement.get);
-assertEquals(el_setter, descAccessorElement.set);
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index 5353d6c..a0be8dd 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -25,45 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-function GenericToJSONChecks(Constructor, value, alternative) {
-  var n1 = new Constructor(value);
-  n1.valueOf = function () { return alternative; };
-  assertEquals(alternative, n1.toJSON());
-  var n2 = new Constructor(value);
-  n2.valueOf = null;
-  assertThrows(function () { n2.toJSON(); }, TypeError);
-  var n3 = new Constructor(value);
-  n3.valueOf = function () { return {}; };
-  assertThrows(function () { n3.toJSON(); }, TypeError, 'result_not_primitive');
-  var n4 = new Constructor(value);
-  n4.valueOf = function () {
-    assertEquals(0, arguments.length);
-    assertEquals(this, n4);
-    return null;
-  };
-  assertEquals(null, n4.toJSON());
-}
-
-// Number toJSON
-assertEquals(3, (3).toJSON());
-assertEquals(3, (3).toJSON(true));
-assertEquals(4, (new Number(4)).toJSON());
-GenericToJSONChecks(Number, 5, 6);
-
-// Boolean toJSON
-assertEquals(true, (true).toJSON());
-assertEquals(true, (true).toJSON(false));
-assertEquals(false, (false).toJSON());
-assertEquals(true, (new Boolean(true)).toJSON());
-GenericToJSONChecks(Boolean, true, false);
-GenericToJSONChecks(Boolean, false, true);
-
-// String toJSON
-assertEquals("flot", "flot".toJSON());
-assertEquals("flot", "flot".toJSON(3));
-assertEquals("tolf", (new String("tolf")).toJSON());
-GenericToJSONChecks(String, "x", "y");
-
 // Date toJSON
 assertEquals("1970-01-01T00:00:00.000Z", new Date(0).toJSON());
 assertEquals("1979-01-11T08:00:00.000Z", new Date("1979-01-11 08:00 GMT").toJSON());
@@ -74,9 +35,6 @@
 var n2 = new Date(10001);
 n2.toISOString = null;
 assertThrows(function () { n2.toJSON(); }, TypeError);
-var n3 = new Date(10002);
-n3.toISOString = function () { return {}; };
-assertThrows(function () { n3.toJSON(); }, TypeError, "result_not_primitive");
 var n4 = new Date(10003);
 n4.toISOString = function () {
   assertEquals(0, arguments.length);
@@ -88,9 +46,47 @@
 assertTrue(Object.prototype === JSON.__proto__);
 assertEquals("[object JSON]", Object.prototype.toString.call(JSON));
 
+//Test Date.prototype.toJSON as generic function.
+var d1 = {toJSON: Date.prototype.toJSON,
+         toISOString: function() { return 42; }};
+assertEquals(42, d1.toJSON());
+
+var d2 = {toJSON: Date.prototype.toJSON,
+          valueOf: function() { return Infinity; },
+          toISOString: function() { return 42; }};
+assertEquals(null, d2.toJSON());
+
+var d3 = {toJSON: Date.prototype.toJSON,
+          valueOf: "not callable",
+          toString: function() { return Infinity; },
+          toISOString: function() { return 42; }};
+
+assertEquals(null, d3.toJSON());
+
+var d4 = {toJSON: Date.prototype.toJSON,
+          valueOf: "not callable",
+          toString: "not callable either",
+          toISOString: function() { return 42; }};
+assertThrows("d4.toJSON()", TypeError);  // ToPrimitive throws. 
+
+var d5 = {toJSON: Date.prototype.toJSON,
+          valueOf: "not callable",
+          toString: function() { return "Infinity"; },
+          toISOString: function() { return 42; }};
+assertEquals(42, d5.toJSON());
+
+var d6 = {toJSON: Date.prototype.toJSON,
+          toISOString: function() { return ["not primitive"]; }};
+assertEquals(["not primitive"], d6.toJSON());
+
+var d7 = {toJSON: Date.prototype.toJSON,
+          ISOString: "not callable"};
+assertThrows("d7.toJSON()", TypeError);
+
 // DontEnum
-for (var p in this)
+for (var p in this) {
   assertFalse(p == "JSON");
+}
 
 // Parse
 assertEquals({}, JSON.parse("{}"));
@@ -278,9 +274,15 @@
              JSON.stringify({a:"b",c:"d"}, null, 1));
 assertEquals('{"y":6,"x":5}', JSON.stringify({x:5,y:6}, ['y', 'x']));
 
+// toJSON get string keys.
+var checker = {};
+var array = [checker];
+checker.toJSON = function(key) { return 1 + key; };
+assertEquals('["10"]', JSON.stringify(array));
+
 // The gap is capped at ten characters if specified as string.
 assertEquals('{\n          "a": "b",\n          "c": "d"\n}',
-              JSON.stringify({a:"b",c:"d"}, null, 
+              JSON.stringify({a:"b",c:"d"}, null,
                              "          /*characters after 10th*/"));
 
 //The gap is capped at ten characters if specified as number.
@@ -295,16 +297,16 @@
 
 assertEquals(undefined, JSON.stringify(undefined));
 assertEquals(undefined, JSON.stringify(function () { }));
-// Arrays with missing, undefined or function elements have those elements 
+// Arrays with missing, undefined or function elements have those elements
 // replaced by null.
-assertEquals("[null,null,null]", 
+assertEquals("[null,null,null]",
              JSON.stringify([undefined,,function(){}]));
 
 // Objects with undefined or function properties (including replaced properties)
 // have those properties ignored.
-assertEquals('{}', 
+assertEquals('{}',
              JSON.stringify({a: undefined, b: function(){}, c: 42, d: 42},
-                            function(k, v) { if (k == "c") return undefined; 
+                            function(k, v) { if (k == "c") return undefined;
                                              if (k == "d") return function(){};
                                              return v; }));
 
@@ -328,7 +330,7 @@
     // Step 2.a
     expected = '\\' + string;
   } else if ("\b\t\n\r\f".indexOf(string) >= 0) {
-    // Step 2.b 
+    // Step 2.b
     if (string == '\b') expected = '\\b';
     else if (string == '\t') expected = '\\t';
     else if (string == '\n') expected = '\\n';
@@ -343,6 +345,73 @@
     }
   } else {
     expected = string;
-  }  
+  }
   assertEquals('"' + expected + '"', encoded, "Codepoint " + i);
-} 
+}
+
+
+// Ensure that wrappers and callables are handled correctly.
+var num37 = new Number(42);
+num37.valueOf = function() { return 37; };
+
+var numFoo = new Number(42);
+numFoo.valueOf = "not callable";
+numFoo.toString = function() { return "foo"; };
+
+var numTrue = new Number(42);
+numTrue.valueOf = function() { return true; }
+
+var strFoo = new String("bar");
+strFoo.toString = function() { return "foo"; };
+
+var str37 = new String("bar");
+str37.toString = "not callable";
+str37.valueOf = function() { return 37; };
+
+var strTrue = new String("bar");
+strTrue.toString = function() { return true; }
+
+var func = function() { /* Is callable */ };
+
+var funcJSON = function() { /* Is callable */ };
+funcJSON.toJSON = function() { return "has toJSON"; };
+
+var re = /Is callable/;
+
+var reJSON = /Is callable/;
+reJSON.toJSON = function() { return "has toJSON"; };
+
+assertEquals(
+    '[37,null,1,"foo","37","true",null,"has toJSON",null,"has toJSON"]',
+    JSON.stringify([num37, numFoo, numTrue,
+                    strFoo, str37, strTrue,
+                    func, funcJSON, re, reJSON]));
+
+
+var oddball = Object(42);
+oddball.__proto__ = { __proto__: null, toString: function() { return true; } };
+assertEquals('1', JSON.stringify(oddball));
+
+var getCount = 0;
+var callCount = 0;
+var counter = { get toJSON() { getCount++;
+                               return function() { callCount++;
+                                                   return 42; }; } };
+assertEquals('42', JSON.stringify(counter));
+assertEquals(1, getCount);
+assertEquals(1, callCount);
+
+var oddball2 = Object(42);
+var oddball3 = Object("foo");
+oddball3.__proto__ = { __proto__: null,
+                       toString: "not callable",
+                       valueOf: function() { return true; } };
+oddball2.__proto__ = { __proto__: null,
+                       toJSON: function () { return oddball3; } }
+assertEquals('"true"', JSON.stringify(oddball2));
+
+
+var falseNum = Object("37");
+falseNum.__proto__ = Number.prototype;
+falseNum.toString = function() { return 42; };
+assertEquals('"42"', JSON.stringify(falseNum));
diff --git a/test/mjsunit/mirror-object.js b/test/mjsunit/mirror-object.js
index ad7add8..1888554 100644
--- a/test/mjsunit/mirror-object.js
+++ b/test/mjsunit/mirror-object.js
@@ -74,7 +74,7 @@
     assertEquals('property', properties[i].type(), 'Unexpected mirror type');
     assertEquals(names[i], properties[i].name(), 'Unexpected property name');
   }
-  
+
   for (var p in obj) {
     var property_mirror = mirror.property(p);
     assertTrue(property_mirror instanceof debug.PropertyMirror);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 820dca7..eeeb3dc 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -30,6 +30,14 @@
 # All tests in the bug directory are expected to fail.
 bugs: FAIL
 
+
+##############################################################################
+# Too slow in debug mode with --stress-opt
+compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
+compiler/regress-funcaller: PASS, SKIP if $mode == debug
+regress/regress-create-exception: PASS, SKIP if $mode == debug
+
+##############################################################################
 # This one uses a built-in that's only present in debug mode. It takes
 # too long to run in debug mode on ARM.
 fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
@@ -49,6 +57,8 @@
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
 
+
+##############################################################################
 [ $arch == arm ]
 
 # Slow tests which times out in debug mode.
@@ -60,15 +70,51 @@
 unicode-test: PASS, (PASS || FAIL) if $mode == debug
 
 # Times out often in release mode on ARM.
+compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
 array-splice: PASS || TIMEOUT
 
-# Skip long running test in debug mode on ARM.
-string-indexof-2: PASS, SKIP if $mode == debug
+# Long running test.
+mirror-object: PASS || TIMEOUT
+string-indexof-2: PASS || TIMEOUT
+
+# BUG(3251035): Timeouts in long looping crankshaft optimization
+# tests. Skipping because having them timeout takes too long on the
+# buildbot.
+compiler/alloc-number: SKIP
+compiler/array-length: SKIP
+compiler/assignment-deopt: SKIP
+compiler/deopt-args: SKIP
+compiler/inline-compare: SKIP
+compiler/inline-global-access: SKIP
+compiler/optimized-function-calls: SKIP
+compiler/pic: SKIP
+compiler/property-calls: SKIP
+compiler/recursive-deopt: SKIP
+compiler/regress-4: SKIP
+compiler/regress-funcaller: SKIP
+compiler/regress-gvn: SKIP
+compiler/regress-rep-change: SKIP
+compiler/regress-arguments: SKIP
+compiler/regress-funarguments: SKIP
+compiler/regress-or: SKIP
+compiler/regress-3249650: SKIP
+compiler/simple-deopt: SKIP
+regress/regress-490: SKIP
+regress/regress-634: SKIP
+regress/regress-create-exception: SKIP
+regress/regress-3218915: SKIP
+regress/regress-3247124: SKIP
 
 
+##############################################################################
+[ $arch == arm && $crankshaft ]
+
+# Test that currently fail with crankshaft on ARM.
+compiler/simple-osr: FAIL
+
+
+##############################################################################
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
 *: SKIP
-
-
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index b258aa7..d24a4e5 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -74,7 +74,7 @@
 // Descriptors.
 var emptyDesc = {};
 
-var accessorConfigurable = { 
+var accessorConfigurable = {
     set: setter1,
     get: getter1,
     configurable: true
@@ -83,7 +83,7 @@
 var accessorNoConfigurable = {
     set: setter2,
     get: getter2,
-    configurable: false 
+    configurable: false
 };
 
 var accessorOnlySet = {
@@ -234,7 +234,7 @@
 assertEquals(1, obj1.setOnly = 1);
 assertEquals(2, val3);
 
-// The above should also work if redefining just a getter or setter on 
+// The above should also work if redefining just a getter or setter on
 // an existing property with both a getter and a setter.
 Object.defineProperty(obj1, "both", accessorConfigurable);
 
@@ -384,7 +384,7 @@
 assertEquals(desc.set, undefined);
 
 
-// Redefinition of an accessor defined using __defineGetter__ and 
+// Redefinition of an accessor defined using __defineGetter__ and
 // __defineSetter__.
 function get(){return this.x}
 function set(x){this.x=x};
@@ -462,7 +462,7 @@
 
 
 // Test runtime calls to DefineOrRedefineDataProperty and
-// DefineOrRedefineAccessorProperty - make sure we don't 
+// DefineOrRedefineAccessorProperty - make sure we don't
 // crash.
 try {
   %DefineOrRedefineAccessorProperty(0, 0, 0, 0, 0);
@@ -511,7 +511,7 @@
 // Test that all possible differences in step 6 in DefineOwnProperty are
 // exercised, i.e., any difference in the given property descriptor and the
 // existing properties should not return true, but throw an error if the
-// existing configurable property is false. 
+// existing configurable property is false.
 
 var obj5 = {};
 // Enumerable will default to false.
@@ -727,7 +727,7 @@
 var descElementNonConfigurable = { value: 'barfoo', configurable: false };
 var descElementNonWritable = { value: 'foofoo', writable: false };
 var descElementNonEnumerable = { value: 'barbar', enumerable: false };
-var descElementAllFalse = { value: 'foofalse', 
+var descElementAllFalse = { value: 'foofalse',
                             configurable: false,
                             writable: false,
                             enumerable: false };
@@ -790,7 +790,7 @@
 
 // Make sure that we can't redefine using direct access.
 obj6[15] ='overwrite';
-assertEquals(obj6[15],'foobar'); 
+assertEquals(obj6[15],'foobar');
 
 
 // Repeat the above tests on an array.
@@ -805,7 +805,7 @@
 var descElementNonConfigurable = { value: 'barfoo', configurable: false };
 var descElementNonWritable = { value: 'foofoo', writable: false };
 var descElementNonEnumerable = { value: 'barbar', enumerable: false };
-var descElementAllFalse = { value: 'foofalse', 
+var descElementAllFalse = { value: 'foofalse',
                             configurable: false,
                             writable: false,
                             enumerable: false };
@@ -866,4 +866,35 @@
 assertFalse(desc.enumerable);
 assertFalse(desc.configurable);
 
+// See issue 968: http://code.google.com/p/v8/issues/detail?id=968
+var o = { x : 42 };
+Object.defineProperty(o, "x", { writable: false });
+assertEquals(42, o.x);
+o.x = 37;
+assertEquals(42, o.x);
 
+o = { x : 42 };
+Object.defineProperty(o, "x", {});
+assertEquals(42, o.x);
+o.x = 37;
+// Writability is preserved.
+assertEquals(37, o.x);
+
+var o = { };
+Object.defineProperty(o, "x", { writable: false });
+assertEquals(undefined, o.x);
+o.x = 37;
+assertEquals(undefined, o.x);
+
+o = { get x() { return 87; } };
+Object.defineProperty(o, "x", { writable: false });
+assertEquals(undefined, o.x);
+o.x = 37;
+assertEquals(undefined, o.x);
+
+// Ignore inherited properties.
+o = { __proto__ : { x : 87 } };
+Object.defineProperty(o, "x", { writable: false });
+assertEquals(undefined, o.x);
+o.x = 37;
+assertEquals(undefined, o.x);
diff --git a/test/mjsunit/object-toprimitive.js b/test/mjsunit/object-toprimitive.js
new file mode 100644
index 0000000..3a67ced
--- /dev/null
+++ b/test/mjsunit/object-toprimitive.js
@@ -0,0 +1,104 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test the ToPrimitive internal function used by ToNumber/ToString.
+// Does it [[Get]] and [[Call]] the object's toString and valueOf properties
+// correctly. Specifically, does it call [[Get]] only once per property.
+
+var o1 = { toString: function() { return 42; },
+           valueOf: function() { return "37"; } };
+var n1 = Number(o1);
+var s1 = String(o1);
+assertTrue(typeof n1 == "number");
+assertTrue(typeof s1 == "string");
+
+var trace = [];
+var valueOfValue = 42;
+var toStringValue = "foo";
+function traceValueOf () {
+  trace.push("vo");
+  return valueOfValue;
+};
+function traceToString() {
+  trace.push("ts");
+  return toStringValue;
+};
+var valueOfFunc = traceValueOf;
+var toStringFunc = traceToString;
+
+var ot = { get toString() { trace.push("gts");
+                            return toStringFunc; },
+           get valueOf() { trace.push("gvo");
+                           return valueOfFunc; }
+};
+
+var nt = Number(ot);
+assertEquals(42, nt);
+assertEquals(["gvo","vo"], trace);
+
+trace = [];
+var st = String(ot);
+assertEquals("foo", st);
+assertEquals(["gts","ts"], trace);
+
+trace = [];
+valueOfValue = ["not primitive"];
+var nt = Number(ot);
+assertEquals(Number("foo"), nt);
+assertEquals(["gvo", "vo", "gts", "ts"], trace);
+
+trace = [];
+valueOfValue = 42;
+toStringValue = ["not primitive"];
+var st = String(ot);
+assertEquals(String(42), st);
+assertEquals(["gts", "ts", "gvo", "vo"], trace);
+
+trace = [];
+valueOfValue = ["not primitive"];
+assertThrows("Number(ot)", TypeError);
+assertEquals(["gvo", "vo", "gts", "ts"], trace);
+
+
+toStringFunc = "not callable";
+trace = [];
+valueOfValue = 42;
+var st = String(ot);
+assertEquals(String(42), st);
+assertEquals(["gts", "gvo", "vo"], trace);
+
+valueOfFunc = "not callable";
+trace = [];
+assertThrows("String(ot)", TypeError);
+assertEquals(["gts", "gvo"], trace);
+
+toStringFunc = traceToString;
+toStringValue = "87";
+trace = [];
+var nt = Number(ot);
+assertEquals(87, nt);
+assertEquals(["gvo", "gts", "ts"], trace);
diff --git a/test/mjsunit/regexp.js b/test/mjsunit/regexp.js
index b57b86d..4c1d2e3 100644
--- a/test/mjsunit/regexp.js
+++ b/test/mjsunit/regexp.js
@@ -202,6 +202,17 @@
 assertFalse(re.test('a'));
 assertFalse(re.test('Z'));
 
+// First - is treated as range operator, second as literal minus.
+// This follows the specification in parsing, but doesn't throw on
+// the \s at the beginning of the range.
+re = /[\s-0-9]/;
+assertTrue(re.test(' '));
+assertTrue(re.test('\xA0'));
+assertTrue(re.test('-'));
+assertTrue(re.test('0'));
+assertTrue(re.test('9'));
+assertFalse(re.test('1'));
+
 // Test beginning and end of line assertions with or without the
 // multiline flag.
 re = /^\d+/;
@@ -647,3 +658,4 @@
 assertEquals(["bc"], re.exec("zimzomzumbc"));
 assertFalse(re.test("c"));
 assertFalse(re.test(""));
+
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3006390.js
similarity index 87%
rename from test/mjsunit/regress/regress-3408144.js
rename to test/mjsunit/regress/regress-3006390.js
index 6e292d6..4f916ef 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-3006390.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+function X() { }
+X.prototype.valueOf = function () { return 7; }
 
+function f(x, y) { return x % y; }
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
-}
-
-assertFalse(foo());
+assertEquals(1, f(8, new X()));
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/regress/regress-3185905.js
similarity index 74%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/regress/regress-3185905.js
index e8028ce..bd611ab 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/regress/regress-3185905.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,36 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function test1(x) {
+  var a = arguments.callee;
+  x = 1;
+  x = 2;
+  assertEquals(2, x);
+}
+test1(0)
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+function test2(x) {
+  var a = arguments.callee;
+  x++;
+  x++;
+  assertEquals(2, x);
+}
+test2(0)
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function test3(x) {
+  var a = arguments.callee;
+  x += 1;
+  x += 1;
+  assertEquals(2, x);
+}
+test3(0)
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+function test4(x) {
+  var arguments = { 0 : 3, 'x' : 4 };
+  x += 1;
+  x += 1;
+  assertEquals(2, x);
+  assertEquals(3, arguments[0])
+  assertEquals(4, arguments['x'])
+}
+test4(0)
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3199913.js
similarity index 77%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/regress/regress-3199913.js
index 6e292d6..e202af1 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-3199913.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Test that bailout during evaluation of the key for a keyed call works as
+// intended.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+var y = {
+  'a' : function (x, y) { return 'called a(' + x + ', ' + y + ')' },
+  'b' : function (x, y) { return 'called b(' + x + ', ' + y + ')' }
 }
 
-assertFalse(foo());
+function C() {
+}
+
+C.prototype.f = function () {
+  return y[(this.a == 1 ? "a" : "b")](0, 1);
+}
+
+obj = new C()
+assertEquals('called b(0, 1)', obj.f())
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3218530.js
similarity index 81%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/regress/regress-3218530.js
index 6e292d6..247f3df 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-3218530.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// This tests that a global key values are preserved when used in
+// an expression which will bail out.
 
+var m = Math;
+var p = "floor";
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function test() {
+  var bignumber = 31363200000;
+  assertDoesNotThrow(assertEquals(m[p](Math.round(bignumber/864E5)/7)+1, 52));
 }
 
-assertFalse(foo());
+test();
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3218915.js
similarity index 76%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/regress/regress-3218915.js
index 6e292d6..5fcbcec 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-3218915.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,18 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Checks that comma expression in conditional context is processed correctly.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function withCommaExpressionInConditional(x) {
+  if (x > 1000) { for (var i = 0; i < 10000; i++) { } }
+  var y;
+  if (y = x, y > 1) {
+    return 'big';
+  }
+  return (y = x + 1, y > 1) ? 'medium' : 'small';
 }
 
-assertFalse(foo());
+for (var i = 0; i < 10000; i++) {
+  withCommaExpressionInConditional(i);
+}
+withCommaExpressionInConditional("1")
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3230771.js
similarity index 85%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/regress/regress-3230771.js
index 6e292d6..bd00798 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-3230771.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+// Regression test for missing stack-overflow check in
+// VisitForStatement in hydrogen graph building.
 
-
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function f() {
+  for (var h = typeof arguments[0] == "object" ? 0 : arguments; false; ) { }
 }
 
-assertFalse(foo());
+f();
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-3247124.js
similarity index 70%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/regress/regress-3247124.js
index 6e292d6..fe4ec4e 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-3247124.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+var foo = unescape("%E0%E2%EA%F4%FB%E3%F5%E1%E9%ED%F3%FA%E7%FC%C0%C2%CA%D4%DB%C3%D5%C1%C9%CD%D3%DA%C7%DC");
 
+function bar(x) {
+  var s = new String(x);
+  var a = new String(foo);
+  var b = new String('aaeouaoaeioucuAAEOUAOAEIOUCU');
 
-// Flags: --nofull-compiler
+  var i = new Number();
+  var j = new Number();
+  var c = new String();
+  var r = '';
 
-function foo() {
-  return (0 > ("10"||10) - 1);
+  for (i = 0; i < s.length; i++) {
+    c = s.substring(i, i + 1);
+    for (j = 0; j < a.length; j++) {
+      if (a.substring(j, j + 1) == c) {
+        c = b.substring(j, j + 1);
+      }
+    }
+    r += c;
+  }
+
+  return r.toLowerCase();
 }
 
-assertFalse(foo());
+for (var i = 0; i < 100; i++) bar(foo);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/regress/regress-3252443.js
similarity index 67%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/regress/regress-3252443.js
index e8028ce..cd7aa40 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/regress/regress-3252443.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,21 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+var document = new Object();
+document.getElementById = function(s) { return { style: {}}};
+function x(p0, p1, p2, p3) {
+  document.getElementById(p1+p0).style.display='';
+  document.getElementById(p1+''+p0).style.backgroundColor = "";
+  document.getElementById(p1+''+p0).style.color="";
+  document.getElementById(p1+''+p0).style.borderBottomColor = "";
+  for (var i = p3; i <= p2; ++i) {
+    if (i != p0) {
+      document.getElementById(p1+i).style.display='';
+      document.getElementById(p1+''+i).style.backgroundColor = "";
+      document.getElementById(p1+''+i).style.color="";
+      document.getElementById(p1+''+i).style.borderBottomColor = "";
+    }
+  }
+}
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
-
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
-
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+x(1, "xxx", 10000, 1)
diff --git a/test/mjsunit/regress/regress-52801.js b/test/mjsunit/regress/regress-52801.js
index 80cc0c7..9a34b81 100644
--- a/test/mjsunit/regress/regress-52801.js
+++ b/test/mjsunit/regress/regress-52801.js
@@ -67,5 +67,3 @@
 re.lastIndex = 0;
 re.exec(str);
 assertEquals(5, re.lastIndex);  // Fails if caching.
-
-
diff --git a/test/mjsunit/regress/regress-580.js b/test/mjsunit/regress/regress-580.js
index c6b3db7..6b1d098 100644
--- a/test/mjsunit/regress/regress-580.js
+++ b/test/mjsunit/regress/regress-580.js
@@ -32,22 +32,22 @@
   var x;
   var tmp = 0;
   x = (tmp = 1578221999, tmp)+(tmp = 572285336, tmp);
-  assertEquals(2150507335, x);
+  assertEquals(2150507335, x, "++");
   x = 1578221999 + 572285336;
   assertEquals(2150507335, x);
 
   x = (tmp = -1500000000, tmp)+(tmp = -2000000000, tmp);
-  assertEquals(-3500000000, x);
+  assertEquals(-3500000000, x, "+-");
   x = -1500000000 + -2000000000;
   assertEquals(-3500000000, x);
 
   x = (tmp = 1578221999, tmp)-(tmp = -572285336, tmp);
-  assertEquals(2150507335, x);
+  assertEquals(2150507335, x, "--");
   x = 1578221999 - -572285336;
   assertEquals(2150507335, x);
 
   x = (tmp = -1500000000, tmp)-(tmp = 2000000000, tmp);
-  assertEquals(-3500000000, x);
+  assertEquals(-3500000000, x, "-+");
   x = -1500000000 - 2000000000;
   assertEquals(-3500000000, x);
 }
diff --git a/test/mjsunit/regress/regress-687.js b/test/mjsunit/regress/regress-687.js
new file mode 100644
index 0000000..a917a44
--- /dev/null
+++ b/test/mjsunit/regress/regress-687.js
@@ -0,0 +1,75 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This regression includes a number of cases where we did not correctly
+// update a accessor property to a data property using Object.defineProperty.
+
+var obj = { get value() {}, set value (v) { throw "Error";} };
+assertDoesNotThrow(
+    Object.defineProperty(obj, "value",
+                          { value: 5, writable:true, configurable: true }));
+var desc = Object.getOwnPropertyDescriptor(obj, "value");
+assertEquals(obj.value, 5);
+assertTrue(desc.configurable);
+assertTrue(desc.enumerable);
+assertTrue(desc.writable);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+
+var proto = {
+  get value() {},
+  set value(v) { Object.defineProperty(this, "value", {value: v}); }
+};
+
+var create = Object.create(proto);
+
+assertEquals(create.value, undefined);
+assertDoesNotThrow(create.value = 4);
+assertEquals(create.value, 4);
+
+// These tests where provided in bug 959, but are all related to the this issue.
+var obj1 = {};
+Object.defineProperty(obj1, 'p', {get: undefined, set: undefined});
+assertTrue("p" in obj1);
+desc = Object.getOwnPropertyDescriptor(obj1, "p");
+assertFalse(desc.configurable);
+assertFalse(desc.enumerable);
+assertEquals(desc.value, undefined);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
+
+
+var obj2 = { get p() {}};
+Object.defineProperty(obj2, 'p', {get: undefined})
+assertTrue("p" in obj2);
+desc = Object.getOwnPropertyDescriptor(obj2, "p");
+assertTrue(desc.configurable);
+assertTrue(desc.enumerable);
+assertEquals(desc.value, undefined);
+assertEquals(desc.get, undefined);
+assertEquals(desc.set, undefined);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/regress/regress-962.js
similarity index 74%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/regress/regress-962.js
index e8028ce..f9f46e1 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/regress/regress-962.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+function L(scope) { this.s = new Object(); }
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+L.prototype.c = function() { return true; }
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+function F() {
+  this.l = [new L, new L];
+}
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+F.prototype.foo = function () {
+    var f, d = arguments,
+        e, b = this.l,
+        g;
+    for (e = 0; e < b.length; e++) {
+        g = b[e];
+        f = g.c.apply(g.s, d);
+        if (f === false) {
+            break
+        }
+    }
+    return f
+}
+
+
+var ctx = new F;
+
+for (var i = 0; i < 10000; i++) ctx.foo();
diff --git a/test/mjsunit/regress/regress-969.js b/test/mjsunit/regress/regress-969.js
new file mode 100644
index 0000000..c2ba0ac
--- /dev/null
+++ b/test/mjsunit/regress/regress-969.js
@@ -0,0 +1,127 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for bugs when deoptimizing after assignments in effect
+// contexts.
+
+// Bug 989 is that there was an extra value on the expression stack when
+// deoptimizing after an assignment in effect context (the value of the
+// assignment was lingering).  This is hard to observe in the unoptimized
+// code.
+//
+// This test uses comma expressions to put assignments in effect contexts,
+// references to deleted global variables to force deoptimization, and
+// function calls to observe an extra value.
+
+function first(x, y) { return x; }
+var y = 0;
+var o = {};
+o.x = 0;
+o[0] = 0;
+
+// Assignment to global variable.
+x0 = 0;
+function test0() { return first((y = 1, typeof x0), 2); }
+// Call the function once to compile it.
+assertEquals('number', test0());
+// Delete to force deoptimization on the next call.
+delete x0;
+assertEquals('undefined', test0());
+
+// Compound assignment to global variable.
+x1 = 0;
+function test1() { return first((y += 1, typeof x1), 2); }
+assertEquals('number', test1(), 'test1 before');
+delete x1;
+assertEquals('undefined', test1(), 'test1 after');
+
+// Pre and post-increment of global variable.
+x2 = 0;
+function test2() { return first((++y, typeof x2), 2); }
+assertEquals('number', test2(), 'test2 before');
+delete x2;
+assertEquals('undefined', test2(), 'test2 after');
+
+x3 = 0;
+function test3() { return first((y++, typeof x3), 2); }
+assertEquals('number', test3(), 'test3 before');
+delete x3;
+assertEquals('undefined', test3(), 'test3 after');
+
+
+// Assignment, compound assignment, and pre and post-increment of named
+// properties.
+x4 = 0;
+function test4() { return first((o.x = 1, typeof x4), 2); }
+assertEquals('number', test4());
+delete x4;
+assertEquals('undefined', test4());
+
+x5 = 0;
+function test5() { return first((o.x += 1, typeof x5), 2); }
+assertEquals('number', test5());
+delete x5;
+assertEquals('undefined', test5());
+
+x6 = 0;
+function test6() { return first((++o.x, typeof x6), 2); }
+assertEquals('number', test6());
+delete x6;
+assertEquals('undefined', test6());
+
+x7 = 0;
+function test7() { return first((o.x++, typeof x7), 2); }
+assertEquals('number', test7());
+delete x7;
+assertEquals('undefined', test7());
+
+
+// Assignment, compound assignment, and pre and post-increment of indexed
+// properties.
+x8 = 0;
+function test8(index) { return first((o[index] = 1, typeof x8), 2); }
+assertEquals('number', test8());
+delete x8;
+assertEquals('undefined', test8());
+
+x9 = 0;
+function test9(index) { return first((o[index] += 1, typeof x9), 2); }
+assertEquals('number', test9());
+delete x9;
+assertEquals('undefined', test9());
+
+x10 = 0;
+function test10(index) { return first((++o[index], typeof x10), 2); }
+assertEquals('number', test10());
+delete x10;
+assertEquals('undefined', test10());
+
+x11 = 0;
+function test11(index) { return first((o[index]++, typeof x11), 2); }
+assertEquals('number', test11());
+delete x11;
+assertEquals('undefined', test11());
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/regress/regress-982.js
similarity index 83%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/regress/regress-982.js
index 6e292d6..d88543a 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/regress/regress-982.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,21 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+function f(a) {
+ return {className: 'xxx'};
+};
 
+var x = 1;
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function g(active) {
+ for (i = 1; i <= 20000; i++) {
+   if (i == active) {
+     x = i;
+     if (f("" + i) != null) { }
+   } else {
+     if (f("" + i) != null) { }
+   }
+ }
 }
 
-assertFalse(foo());
+g(0);
diff --git a/test/mjsunit/regress/regress-1146.js b/test/mjsunit/regress/regress-995.js
similarity index 71%
copy from test/mjsunit/regress/regress-1146.js
copy to test/mjsunit/regress/regress-995.js
index e8028ce..e88121a 100644
--- a/test/mjsunit/regress/regress-1146.js
+++ b/test/mjsunit/regress/regress-995.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,24 +25,33 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test keyed calls with different key types.
-function F() {}
-var a = new F();
-function f(i) { return a[i](); }
+//
+// A number of hydrogen instructions did not correctly compare its
+// data during GVN.
+//
+// Flags: --allow-natives-syntax
 
-a.first = function() { return 11; }
-a[0] = function() { return 22; }
-var obj = {};
-a[obj] = function() { return 33; }
+// HHasInstance.
+function f(value) {
+  if (%_IsSpecObject(value)) {
+    if ((%_IsArray(value))) assertTrue(false);
+  }
+}
+f(new String("bar"));
 
-// Make object slow-case.
-a.foo = 0;
-delete a.foo;
-// Do multiple calls for IC transitions.
-var b = "first";
-f(b);
-f(b);
+// HClassOf.
+function g(value) {
+  if (%_ClassOf(value) === 'Date') {
+    if (%_ClassOf(value) === 'String') assertTrue(false);
+  }
+}
+g(new Date());
 
-assertEquals(11, f(b));
-assertEquals(22, f(0));
-assertEquals(33, f(obj));
+// HIsNull.
+function h(value) {
+  if (value == null) {
+    if (value === null) assertTrue(false);
+  }
+}
+h(undefined);
+
diff --git a/test/mjsunit/smi-ops-inlined.js b/test/mjsunit/smi-ops-inlined.js
new file mode 100644
index 0000000..afc6cc0
--- /dev/null
+++ b/test/mjsunit/smi-ops-inlined.js
@@ -0,0 +1,673 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --always-inline-smi-code
+
+const SMI_MAX = (1 << 30) - 1;
+const SMI_MIN = -(1 << 30);
+const ONE = 1;
+const ONE_HUNDRED = 100;
+
+const OBJ_42 = new (function() {
+  this.valueOf = function() { return 42; };
+})();
+
+assertEquals(42, OBJ_42.valueOf());
+
+
+function Add1(x) {
+  return x + 1;
+}
+
+function Add100(x) {
+  return x + 100;
+}
+
+function Add1Reversed(x) {
+  return 1 + x;
+}
+
+function Add100Reversed(x) {
+  return 100 + x;
+}
+
+
+assertEquals(1, Add1(0));  // fast case
+assertEquals(1, Add1Reversed(0));  // fast case
+assertEquals(SMI_MAX + ONE, Add1(SMI_MAX), "smimax + 1");
+assertEquals(SMI_MAX + ONE, Add1Reversed(SMI_MAX), "1 + smimax");
+assertEquals(42 + ONE, Add1(OBJ_42));  // non-smi
+assertEquals(42 + ONE, Add1Reversed(OBJ_42));  // non-smi
+
+assertEquals(100, Add100(0));  // fast case
+assertEquals(100, Add100Reversed(0));  // fast case
+assertEquals(SMI_MAX + ONE_HUNDRED, Add100(SMI_MAX), "smimax + 100");
+assertEquals(SMI_MAX + ONE_HUNDRED, Add100Reversed(SMI_MAX), " 100 + smimax");
+assertEquals(42 + ONE_HUNDRED, Add100(OBJ_42));  // non-smi
+assertEquals(42 + ONE_HUNDRED, Add100Reversed(OBJ_42));  // non-smi
+
+
+
+function Sub1(x) {
+  return x - 1;
+}
+
+function Sub100(x) {
+  return x - 100;
+}
+
+function Sub1Reversed(x) {
+  return 1 - x;
+}
+
+function Sub100Reversed(x) {
+  return 100 - x;
+}
+
+
+assertEquals(0, Sub1(1));  // fast case
+assertEquals(-1, Sub1Reversed(2));  // fast case
+assertEquals(SMI_MIN - ONE, Sub1(SMI_MIN));  // overflow
+assertEquals(ONE - SMI_MIN, Sub1Reversed(SMI_MIN));  // overflow
+assertEquals(42 - ONE, Sub1(OBJ_42));  // non-smi
+assertEquals(ONE - 42, Sub1Reversed(OBJ_42));  // non-smi
+
+assertEquals(0, Sub100(100));  // fast case
+assertEquals(1, Sub100Reversed(99));  // fast case
+assertEquals(SMI_MIN - ONE_HUNDRED, Sub100(SMI_MIN));  // overflow
+assertEquals(ONE_HUNDRED - SMI_MIN, Sub100Reversed(SMI_MIN));  // overflow
+assertEquals(42 - ONE_HUNDRED, Sub100(OBJ_42));  // non-smi
+assertEquals(ONE_HUNDRED - 42, Sub100Reversed(OBJ_42));  // non-smi
+
+
+function Shr1(x) {
+  return x >>> 1;
+}
+
+function Shr100(x) {
+  return x >>> 100;
+}
+
+function Shr1Reversed(x) {
+  return 1 >>> x;
+}
+
+function Shr100Reversed(x) {
+  return 100 >>> x;
+}
+
+function Sar1(x) {
+  return x >> 1;
+}
+
+function Sar100(x) {
+  return x >> 100;
+}
+
+function Sar1Reversed(x) {
+  return 1 >> x;
+}
+
+function Sar100Reversed(x) {
+  return 100 >> x;
+}
+
+
+assertEquals(0, Shr1(1));
+assertEquals(0, Sar1(1));
+assertEquals(0, Shr1Reversed(2));
+assertEquals(0, Sar1Reversed(2));
+assertEquals(1610612736, Shr1(SMI_MIN));
+assertEquals(-536870912, Sar1(SMI_MIN));
+assertEquals(1, Shr1Reversed(SMI_MIN));
+assertEquals(1, Sar1Reversed(SMI_MIN));
+assertEquals(21, Shr1(OBJ_42));
+assertEquals(21, Sar1(OBJ_42));
+assertEquals(0, Shr1Reversed(OBJ_42));
+assertEquals(0, Sar1Reversed(OBJ_42));
+
+assertEquals(6, Shr100(100), "100 >>> 100");
+assertEquals(6, Sar100(100), "100 >> 100");
+assertEquals(12, Shr100Reversed(99));
+assertEquals(12, Sar100Reversed(99));
+assertEquals(201326592, Shr100(SMI_MIN));
+assertEquals(-67108864, Sar100(SMI_MIN));
+assertEquals(100, Shr100Reversed(SMI_MIN));
+assertEquals(100, Sar100Reversed(SMI_MIN));
+assertEquals(2, Shr100(OBJ_42));
+assertEquals(2, Sar100(OBJ_42));
+assertEquals(0, Shr100Reversed(OBJ_42));
+assertEquals(0, Sar100Reversed(OBJ_42));
+
+
+function Xor1(x) {
+  return x ^ 1;
+}
+
+function Xor100(x) {
+  return x ^ 100;
+}
+
+function Xor1Reversed(x) {
+  return 1 ^ x;
+}
+
+function Xor100Reversed(x) {
+  return 100 ^ x;
+}
+
+
+assertEquals(0, Xor1(1));
+assertEquals(3, Xor1Reversed(2));
+assertEquals(SMI_MIN + 1, Xor1(SMI_MIN));
+assertEquals(SMI_MIN + 1, Xor1Reversed(SMI_MIN));
+assertEquals(43, Xor1(OBJ_42));
+assertEquals(43, Xor1Reversed(OBJ_42));
+
+assertEquals(0, Xor100(100));
+assertEquals(7, Xor100Reversed(99));
+assertEquals(-1073741724, Xor100(SMI_MIN));
+assertEquals(-1073741724, Xor100Reversed(SMI_MIN));
+assertEquals(78, Xor100(OBJ_42));
+assertEquals(78, Xor100Reversed(OBJ_42));
+
+var x = 0x23; var y = 0x35;
+assertEquals(0x16, x ^ y);
+
+
+// Bitwise not.
+var v = 0;
+assertEquals(-1, ~v);
+v = SMI_MIN;
+assertEquals(0x3fffffff, ~v, "~smimin");
+v = SMI_MAX;
+assertEquals(-0x40000000, ~v, "~smimax");
+
+// Overflowing ++ and --.
+v = SMI_MAX;
+v++;
+assertEquals(0x40000000, v, "smimax++");
+v = SMI_MIN;
+v--;
+assertEquals(-0x40000001, v, "smimin--");
+
+// Not actually Smi operations.
+// Check that relations on unary ops work.
+var v = -1.2;
+assertTrue(v == v);
+assertTrue(v === v);
+assertTrue(v <= v);
+assertTrue(v >= v);
+assertFalse(v < v);
+assertFalse(v > v);
+assertFalse(v != v);
+assertFalse(v !== v);
+
+// Right hand side of unary minus is overwritable.
+v = 1.5
+assertEquals(-2.25, -(v * v));
+
+// Smi input to bitop gives non-smi result where the rhs is a float that
+// can be overwritten.
+var x1 = 0x10000000;
+var x2 = 0x40000002;
+var x3 = 0x40000000;
+assertEquals(0x40000000, x1 << (x2 - x3), "0x10000000<<1(1)");
+
+// Smi input to bitop gives non-smi result where the rhs could be overwritten
+// if it were a float, but it isn't.
+x1 = 0x10000000
+x2 = 4
+x3 = 2
+assertEquals(0x40000000, x1 << (x2 - x3), "0x10000000<<2(2)");
+
+
+// Test shift operators on non-smi inputs, giving smi and non-smi results.
+function testShiftNonSmis() {
+  var pos_non_smi = 2000000000;
+  var neg_non_smi = -pos_non_smi;
+  var pos_smi = 1000000000;
+  var neg_smi = -pos_smi;
+
+  // Begin block A
+  assertEquals(pos_non_smi, (pos_non_smi) >> 0);
+  assertEquals(pos_non_smi, (pos_non_smi) >>> 0);
+  assertEquals(pos_non_smi, (pos_non_smi) << 0);
+  assertEquals(neg_non_smi, (neg_non_smi) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> 0);
+  assertEquals(neg_non_smi, (neg_non_smi) << 0);
+  assertEquals(pos_smi, (pos_smi) >> 0, "possmi >> 0");
+  assertEquals(pos_smi, (pos_smi) >>> 0, "possmi >>>0");
+  assertEquals(pos_smi, (pos_smi) << 0, "possmi << 0");
+  assertEquals(neg_smi, (neg_smi) >> 0, "negsmi >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_smi) >>> 0, "negsmi >>> 0");
+  assertEquals(neg_smi, (neg_smi) << 0), "negsmi << 0";
+
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >> 1);
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >>> 1);
+  assertEquals(-0x1194D800, (pos_non_smi) << 1);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >> 3);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >>> 3);
+  assertEquals(-0x46536000, (pos_non_smi) << 3);
+  assertEquals(0x73594000, (pos_non_smi) << 4);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >> 0);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >>> 0);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) << 0);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >> 1);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >>> 1);
+  assertEquals(-0x1194D800, (pos_non_smi + 0.5) << 1);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >> 3);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >>> 3);
+  assertEquals(-0x46536000, (pos_non_smi + 0.5) << 3);
+  assertEquals(0x73594000, (pos_non_smi + 0.5) << 4);
+
+  assertEquals(neg_non_smi / 2, (neg_non_smi) >> 1, "negnonsmi >> 1");
+
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> 1,
+               "negnonsmi >>> 1");
+  assertEquals(0x1194D800, (neg_non_smi) << 1);
+  assertEquals(neg_non_smi / 8, (neg_non_smi) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> 3);
+  assertEquals(0x46536000, (neg_non_smi) << 3);
+  assertEquals(-0x73594000, (neg_non_smi) << 4);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> 0,
+               "negnonsmi.5 >>> 0");
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) << 0);
+  assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> 1);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> 1,
+               "negnonsmi.5 >>> 1");
+  assertEquals(0x1194D800, (neg_non_smi - 0.5) << 1);
+  assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5) >>> 3);
+  assertEquals(0x46536000, (neg_non_smi - 0.5) << 3);
+  assertEquals(-0x73594000, (neg_non_smi - 0.5) << 4);
+
+  assertEquals(pos_smi / 2, (pos_smi) >> 1);
+  assertEquals(pos_smi / 2, (pos_smi) >>> 1);
+  assertEquals(pos_non_smi, (pos_smi) << 1);
+  assertEquals(pos_smi / 8, (pos_smi) >> 3);
+  assertEquals(pos_smi / 8, (pos_smi) >>> 3);
+  assertEquals(-0x2329b000, (pos_smi) << 3);
+  assertEquals(0x73594000, (pos_smi) << 5);
+  assertEquals(pos_smi, (pos_smi + 0.5) >> 0, "possmi.5 >> 0");
+  assertEquals(pos_smi, (pos_smi + 0.5) >>> 0, "possmi.5 >>> 0");
+  assertEquals(pos_smi, (pos_smi + 0.5) << 0, "possmi.5 << 0");
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >> 1);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> 1);
+  assertEquals(pos_non_smi, (pos_smi + 0.5) << 1);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >> 3);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >>> 3);
+  assertEquals(-0x2329b000, (pos_smi + 0.5) << 3);
+  assertEquals(0x73594000, (pos_smi + 0.5) << 5);
+
+  assertEquals(neg_smi / 2, (neg_smi) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi) >>> 1);
+  assertEquals(neg_non_smi, (neg_smi) << 1);
+  assertEquals(neg_smi / 8, (neg_smi) >> 3);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> 3);
+  assertEquals(0x46536000, (neg_smi) << 4);
+  assertEquals(-0x73594000, (neg_smi) << 5);
+  assertEquals(neg_smi, (neg_smi - 0.5) >> 0, "negsmi.5 >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> 0, "negsmi.5 >>> 0");
+  assertEquals(neg_smi, (neg_smi - 0.5) << 0, "negsmi.5 << 0");
+  assertEquals(neg_smi / 2, (neg_smi - 0.5) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> 1);
+  assertEquals(neg_non_smi, (neg_smi - 0.5) << 1);
+  assertEquals(neg_smi / 8, (neg_smi - 0.5) >> 3);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi - 0.5) >>> 3);
+  assertEquals(0x46536000, (neg_smi - 0.5) << 4);
+  assertEquals(-0x73594000, (neg_smi - 0.5) << 5);
+  // End block A
+
+  // Repeat block A with 2^32 added to positive numbers and
+  // 2^32 subtracted from negative numbers.
+  // Begin block A repeat 1
+  var two_32 = 0x100000000;
+  var neg_32 = -two_32;
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi) >> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi) >>> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi) << 0);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_32 + neg_non_smi) >>> 0);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi) << 0);
+  assertEquals(pos_smi, (two_32 + pos_smi) >> 0, "2^32+possmi >> 0");
+  assertEquals(pos_smi, (two_32 + pos_smi) >>> 0, "2^32+possmi >>> 0");
+  assertEquals(pos_smi, (two_32 + pos_smi) << 0, "2^32+possmi << 0");
+  assertEquals(neg_smi, (neg_32 + neg_smi) >> 0, "2^32+negsmi >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_32 + neg_smi) >>> 0);
+  assertEquals(neg_smi, (neg_32 + neg_smi) << 0, "2^32+negsmi << 0");
+
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi) >> 1);
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi) >>> 1);
+  assertEquals(-0x1194D800, (two_32 + pos_non_smi) << 1);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi) >> 3);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi) >>> 3);
+  assertEquals(-0x46536000, (two_32 + pos_non_smi) << 3);
+  assertEquals(0x73594000, (two_32 + pos_non_smi) << 4);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi + 0.5) >> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi + 0.5) >>> 0);
+  assertEquals(pos_non_smi, (two_32 + pos_non_smi + 0.5) << 0);
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi + 0.5) >> 1);
+  assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi + 0.5) >>> 1);
+  assertEquals(-0x1194D800, (two_32 + pos_non_smi + 0.5) << 1);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi + 0.5) >> 3);
+  assertEquals(pos_non_smi / 8, (two_32 + pos_non_smi + 0.5) >>> 3);
+  assertEquals(-0x46536000, (two_32 + pos_non_smi + 0.5) << 3);
+  assertEquals(0x73594000, (two_32 + pos_non_smi + 0.5) << 4);
+
+  assertEquals(neg_non_smi / 2, (neg_32 + neg_non_smi) >> 1);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_32 + neg_non_smi) >>> 1);
+  assertEquals(0x1194D800, (neg_32 + neg_non_smi) << 1);
+  assertEquals(neg_non_smi / 8, (neg_32 + neg_non_smi) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_32 + neg_non_smi) >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_non_smi) << 3);
+  assertEquals(-0x73594000, (neg_32 + neg_non_smi) << 4);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi - 0.5) >> 0);
+  assertEquals(neg_non_smi + 0x100000000, (neg_32 + neg_non_smi - 0.5) >>> 0);
+  assertEquals(neg_non_smi, (neg_32 + neg_non_smi - 0.5) << 0);
+  assertEquals(neg_non_smi / 2, (neg_32 + neg_non_smi - 0.5) >> 1);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_32 + neg_non_smi - 0.5)
+               >>> 1);
+  assertEquals(0x1194D800, (neg_32 + neg_non_smi - 0.5) << 1);
+  assertEquals(neg_non_smi / 8, (neg_32 + neg_non_smi - 0.5) >> 3);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_32 + neg_non_smi - 0.5)
+               >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_non_smi - 0.5) << 3);
+  assertEquals(-0x73594000, (neg_32 + neg_non_smi - 0.5) << 4);
+
+  assertEquals(pos_smi / 2, (two_32 + pos_smi) >> 1);
+  assertEquals(pos_smi / 2, (two_32 + pos_smi) >>> 1);
+  assertEquals(pos_non_smi, (two_32 + pos_smi) << 1);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi) >> 3);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi) >>> 3);
+  assertEquals(-0x2329b000, (two_32 + pos_smi) << 3);
+  assertEquals(0x73594000, (two_32 + pos_smi) << 5);
+  assertEquals(pos_smi, (two_32 + pos_smi + 0.5) >> 0);
+  assertEquals(pos_smi, (two_32 + pos_smi + 0.5) >>> 0);
+  assertEquals(pos_smi, (two_32 + pos_smi + 0.5) << 0);
+  assertEquals(pos_smi / 2, (two_32 + pos_smi + 0.5) >> 1);
+  assertEquals(pos_smi / 2, (two_32 + pos_smi + 0.5) >>> 1);
+  assertEquals(pos_non_smi, (two_32 + pos_smi + 0.5) << 1);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi + 0.5) >> 3);
+  assertEquals(pos_smi / 8, (two_32 + pos_smi + 0.5) >>> 3);
+  assertEquals(-0x2329b000, (two_32 + pos_smi + 0.5) << 3);
+  assertEquals(0x73594000, (two_32 + pos_smi + 0.5) << 5);
+
+  assertEquals(neg_smi / 2, (neg_32 + neg_smi) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_32 + neg_smi) >>> 1);
+  assertEquals(neg_non_smi, (neg_32 + neg_smi) << 1);
+  assertEquals(neg_smi / 8, (neg_32 + neg_smi) >> 3);
+  assertEquals((neg_smi + 0x100000000) / 8, (neg_32 + neg_smi) >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_smi) << 4);
+  assertEquals(-0x73594000, (neg_32 + neg_smi) << 5);
+  assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) >> 0, "-2^32+negsmi.5 >> 0");
+  assertEquals(neg_smi + 0x100000000, (neg_32 + neg_smi - 0.5) >>> 0);
+  assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) << 0, "-2^32+negsmi.5 << 0");
+  assertEquals(neg_smi / 2, (neg_32 + neg_smi - 0.5) >> 1);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_32 + neg_smi - 0.5) >>> 1);
+  assertEquals(neg_non_smi, (neg_32 + neg_smi - 0.5) << 1);
+  assertEquals(neg_smi / 8, (neg_32 + neg_smi - 0.5) >> 3);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_32 + neg_smi - 0.5) >>> 3);
+  assertEquals(0x46536000, (neg_32 + neg_smi - 0.5) << 4);
+  assertEquals(-0x73594000, (neg_32 + neg_smi - 0.5) << 5);
+  // End block A repeat 1
+  // Repeat block A with shift amounts in variables intialized with
+  // a constant.
+  var zero = 0;
+  var one = 1;
+  var three = 3;
+  var four = 4;
+  var five = 5;
+  // Begin block A repeat 2
+  assertEquals(pos_non_smi, (pos_non_smi) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) << zero);
+  assertEquals(neg_non_smi, (neg_non_smi) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi) << zero);
+  assertEquals(pos_smi, (pos_smi) >> zero);
+  assertEquals(pos_smi, (pos_smi) >>> zero);
+  assertEquals(pos_smi, (pos_smi) << zero);
+  assertEquals(neg_smi, (neg_smi) >> zero, "negsmi >> zero");
+  assertEquals(neg_smi + 0x100000000, (neg_smi) >>> zero);
+  assertEquals(neg_smi, (neg_smi) << zero, "negsmi << zero");
+
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi) << three);
+  assertEquals(0x73594000, (pos_non_smi) << four);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) << zero);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi + 0.5) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_non_smi + 0.5) << four);
+
+  assertEquals(neg_non_smi / 2, (neg_non_smi) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> three);
+  assertEquals(0x46536000, (neg_non_smi) << three);
+  assertEquals(-0x73594000, (neg_non_smi) << four);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) << zero);
+  assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi - 0.5) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5)
+      >>> three);
+  assertEquals(0x46536000, (neg_non_smi - 0.5) << three);
+  assertEquals(-0x73594000, (neg_non_smi - 0.5) << four);
+
+  assertEquals(pos_smi / 2, (pos_smi) >> one);
+  assertEquals(pos_smi / 2, (pos_smi) >>> one);
+  assertEquals(pos_non_smi, (pos_smi) << one);
+  assertEquals(pos_smi / 8, (pos_smi) >> three);
+  assertEquals(pos_smi / 8, (pos_smi) >>> three);
+  assertEquals(-0x2329b000, (pos_smi) << three);
+  assertEquals(0x73594000, (pos_smi) << five);
+  assertEquals(pos_smi, (pos_smi + 0.5) >> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) >>> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) << zero);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >> one);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> one);
+  assertEquals(pos_non_smi, (pos_smi + 0.5) << one);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >> three);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >>> three);
+  assertEquals(-0x2329b000, (pos_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_smi + 0.5) << five);
+
+  assertEquals(neg_smi / 2, (neg_smi) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi) >>> one);
+  assertEquals(neg_non_smi, (neg_smi) << one);
+  assertEquals(neg_smi / 8, (neg_smi) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> three);
+  assertEquals(0x46536000, (neg_smi) << four);
+  assertEquals(-0x73594000, (neg_smi) << five);
+  assertEquals(neg_smi, (neg_smi - 0.5) >> zero);
+  assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> zero);
+  assertEquals(neg_smi, (neg_smi - 0.5) << zero);
+  assertEquals(neg_smi / 2, (neg_smi - 0.5) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> one);
+  assertEquals(neg_non_smi, (neg_smi - 0.5) << one);
+  assertEquals(neg_smi / 8, (neg_smi - 0.5) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi - 0.5) >>> three);
+  assertEquals(0x46536000, (neg_smi - 0.5) << four);
+  assertEquals(-0x73594000, (neg_smi - 0.5) << five);
+  // End block A repeat 2
+
+  // Repeat previous block, with computed values in the shift variables.
+  five = 0;
+  while (five < 5 ) ++five;
+  four = five - one;
+  three = four - one;
+  one = four - three;
+  zero = one - one;
+
+  // Begin block A repeat 3
+  assertEquals(pos_non_smi, (pos_non_smi) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi) << zero);
+  assertEquals(neg_non_smi, (neg_non_smi) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi) << zero);
+  assertEquals(pos_smi, (pos_smi) >> zero);
+  assertEquals(pos_smi, (pos_smi) >>> zero);
+  assertEquals(pos_smi, (pos_smi) << zero);
+  assertEquals(neg_smi, (neg_smi) >> zero, "negsmi >> zero(2)");
+  assertEquals(neg_smi + 0x100000000, (neg_smi) >>> zero);
+  assertEquals(neg_smi, (neg_smi) << zero, "negsmi << zero(2)");
+
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi) << three);
+  assertEquals(0x73594000, (pos_non_smi) << four);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) >>> zero);
+  assertEquals(pos_non_smi, (pos_non_smi + 0.5) << zero);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >> one);
+  assertEquals(pos_non_smi / 2, (pos_non_smi + 0.5) >>> one);
+  assertEquals(-0x1194D800, (pos_non_smi + 0.5) << one);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >> three);
+  assertEquals(pos_non_smi / 8, (pos_non_smi + 0.5) >>> three);
+  assertEquals(-0x46536000, (pos_non_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_non_smi + 0.5) << four);
+
+  assertEquals(neg_non_smi / 2, (neg_non_smi) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> three);
+  assertEquals(0x46536000, (neg_non_smi) << three);
+  assertEquals(-0x73594000, (neg_non_smi) << four);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> zero);
+  assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> zero);
+  assertEquals(neg_non_smi, (neg_non_smi - 0.5) << zero);
+  assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> one);
+  assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> one);
+  assertEquals(0x1194D800, (neg_non_smi - 0.5) << one);
+  assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> three);
+  assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5)
+      >>> three);
+  assertEquals(0x46536000, (neg_non_smi - 0.5) << three);
+  assertEquals(-0x73594000, (neg_non_smi - 0.5) << four);
+
+  assertEquals(pos_smi / 2, (pos_smi) >> one);
+  assertEquals(pos_smi / 2, (pos_smi) >>> one);
+  assertEquals(pos_non_smi, (pos_smi) << one);
+  assertEquals(pos_smi / 8, (pos_smi) >> three);
+  assertEquals(pos_smi / 8, (pos_smi) >>> three);
+  assertEquals(-0x2329b000, (pos_smi) << three);
+  assertEquals(0x73594000, (pos_smi) << five);
+  assertEquals(pos_smi, (pos_smi + 0.5) >> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) >>> zero);
+  assertEquals(pos_smi, (pos_smi + 0.5) << zero);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >> one);
+  assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> one);
+  assertEquals(pos_non_smi, (pos_smi + 0.5) << one);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >> three);
+  assertEquals(pos_smi / 8, (pos_smi + 0.5) >>> three);
+  assertEquals(-0x2329b000, (pos_smi + 0.5) << three);
+  assertEquals(0x73594000, (pos_smi + 0.5) << five);
+
+  assertEquals(neg_smi / 2, (neg_smi) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi) >>> one);
+  assertEquals(neg_non_smi, (neg_smi) << one);
+  assertEquals(neg_smi / 8, (neg_smi) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> three);
+  assertEquals(0x46536000, (neg_smi) << four);
+  assertEquals(-0x73594000, (neg_smi) << five);
+  assertEquals(neg_smi, (neg_smi - 0.5) >> zero, "negsmi.5 >> zero");
+  assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> zero);
+  assertEquals(neg_smi, (neg_smi - 0.5) << zero, "negsmi.5 << zero");
+  assertEquals(neg_smi / 2, (neg_smi - 0.5) >> one);
+  assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> one);
+  assertEquals(neg_non_smi, (neg_smi - 0.5) << one);
+  assertEquals(neg_smi / 8, (neg_smi - 0.5) >> three);
+  assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi - 0.5) >>> three);
+  assertEquals(0x46536000, (neg_smi - 0.5) << four);
+  assertEquals(-0x73594000, (neg_smi - 0.5) << five);
+  // End block A repeat 3
+
+  // Test non-integer shift value
+  assertEquals(5, 20.5 >> 2.4);
+  assertEquals(5, 20.5 >> 2.7);
+  var shift = 2.4;
+  assertEquals(5, 20.5 >> shift);
+  assertEquals(5, 20.5 >> shift + 0.3);
+  shift = shift + zero;
+  assertEquals(5, 20.5 >> shift);
+  assertEquals(5, 20.5 >> shift + 0.3);
+}
+
+testShiftNonSmis();
+
+function intConversion() {
+  function foo(x) {
+    assertEquals(x, (x * 1.0000000001) | 0, "foo more " + x);
+    assertEquals(x, x | 0, "foo " + x);
+    if (x > 0) {
+      assertEquals(x - 1, (x * 0.9999999999) | 0, "foo less " + x);
+    } else {
+      assertEquals(x + 1, (x * 0.9999999999) | 0, "foo less " + x);
+    }
+  }
+  for (var i = 1; i < 0x80000000; i *= 2) {
+    foo(i);
+    foo(-i);
+  }
+  for (var i = 1; i < 1/0; i *= 2) {
+    assertEquals(i | 0, (i * 1.0000000000000001) | 0, "b" + i);
+    assertEquals(-i | 0, (i * -1.0000000000000001) | 0, "c" + i);
+  }
+  for (var i = 0.5; i > 0; i /= 2) {
+    assertEquals(0, i | 0, "d" + i);
+    assertEquals(0, -i | 0, "e" + i);
+  }
+}
+
+intConversion();
+
+// Verify that we handle the (optimized) corner case of shifting by
+// zero even for non-smis.
+function shiftByZero(n) { return n << 0; }
+
+assertEquals(3, shiftByZero(3.1415));
diff --git a/test/mjsunit/smi-ops.js b/test/mjsunit/smi-ops.js
index 8fa6fec..7945855 100644
--- a/test/mjsunit/smi-ops.js
+++ b/test/mjsunit/smi-ops.js
@@ -699,3 +699,6 @@
 // allocations we got the Smi overflow case wrong.
 function f(x, y) { return y +  ( 1 << (x & 31)); }
 assertEquals(-2147483647, f(31, 1));
+
+// Regression test for correct handling of overflow in smi comparison.
+assertTrue(-0x40000000 < 42);
diff --git a/test/mjsunit/string-replace-gc.js b/test/mjsunit/string-replace-gc.js
index 26fba10..73b310f 100644
--- a/test/mjsunit/string-replace-gc.js
+++ b/test/mjsunit/string-replace-gc.js
@@ -25,7 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --always-compact
 //
 // Regression test for the r1512 fix.
 
diff --git a/test/mjsunit/regress/regress-3408144.js b/test/mjsunit/sum-0-plus-undefined-is-NaN.js
similarity index 82%
copy from test/mjsunit/regress/regress-3408144.js
copy to test/mjsunit/sum-0-plus-undefined-is-NaN.js
index 6e292d6..fb98d0c 100644
--- a/test/mjsunit/regress/regress-3408144.js
+++ b/test/mjsunit/sum-0-plus-undefined-is-NaN.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,13 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Test incorrect code generation for alternations on ARM.
+/**
+ * @fileoverview Test addition of 0 and undefined.
+ */
 
+function sum(a, b) { return a + b; }
 
-// Flags: --nofull-compiler
-
-function foo() {
-  return (0 > ("10"||10) - 1);
+function test(x, y, expectNaN) {
+  for (var i = 0; i < 1000; i++) {
+    assertEquals(expectNaN, isNaN(sum(x, y)));
+  }
 }
 
-assertFalse(foo());
+test(0, 1, false);
+test(0, undefined, true);
diff --git a/test/mjsunit/tools/logreader.js b/test/mjsunit/tools/logreader.js
deleted file mode 100644
index 485990e..0000000
--- a/test/mjsunit/tools/logreader.js
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Load CSV Parser and Log Reader implementations from <project root>/tools.
-// Files: tools/csvparser.js tools/logreader.js
-
-
-(function testAddressParser() {
-  var reader = new devtools.profiler.LogReader({});
-  var parser = reader.createAddressParser('test');
-
-  // Test that 0x values are parsed, and prevAddresses_ are untouched.
-  assertFalse('test' in reader.prevAddresses_);
-  assertEquals(0, parser('0x0'));
-  assertFalse('test' in reader.prevAddresses_);
-  assertEquals(0x100, parser('0x100'));
-  assertFalse('test' in reader.prevAddresses_);
-  assertEquals(0xffffffff, parser('0xffffffff'));
-  assertFalse('test' in reader.prevAddresses_);
-
-  // Test that values that has no '+' or '-' prefix are parsed
-  // and saved to prevAddresses_.
-  assertEquals(0, parser('0'));
-  assertEquals(0, reader.prevAddresses_.test);
-  assertEquals(0x100, parser('100'));
-  assertEquals(0x100, reader.prevAddresses_.test);
-  assertEquals(0xffffffff, parser('ffffffff'));
-  assertEquals(0xffffffff, reader.prevAddresses_.test);
-
-  // Test that values prefixed with '+' or '-' are treated as deltas,
-  // and prevAddresses_ is updated.
-  // Set base value.
-  assertEquals(0x100, parser('100'));
-  assertEquals(0x100, reader.prevAddresses_.test);
-  assertEquals(0x200, parser('+100'));
-  assertEquals(0x200, reader.prevAddresses_.test);
-  assertEquals(0x100, parser('-100'));
-  assertEquals(0x100, reader.prevAddresses_.test);
-})();
-
-
-(function testAddressParser() {
-  var reader = new devtools.profiler.LogReader({});
-
-  assertEquals([0x10000000, 0x10001000, 0xffff000, 0x10000000],
-               reader.processStack(0x10000000, 0, ['overflow',
-                   '+1000', '-2000', '+1000']));
-})();
-
-
-(function testExpandBackRef() {
-  var reader = new devtools.profiler.LogReader({});
-
-  assertEquals('aaaaaaaa', reader.expandBackRef_('aaaaaaaa'));
-  assertEquals('aaaaaaaa', reader.expandBackRef_('#1'));
-  assertEquals('bbbbaaaa', reader.expandBackRef_('bbbb#2:4'));
-  assertEquals('"#1:1"', reader.expandBackRef_('"#1:1"'));
-})();
diff --git a/test/mjsunit/typeof.js b/test/mjsunit/typeof.js
index 15ab7bf..39dec72 100644
--- a/test/mjsunit/typeof.js
+++ b/test/mjsunit/typeof.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --nofull-compiler
-
 // The type of a regular expression should be 'function', including in
 // the context of string equality comparisons.
 
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 1768c39..1f9e6eb 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -45,6 +45,7 @@
 prefix mozilla
 def FAIL_OK = FAIL, OKAY
 
+
 ##################### SKIPPED TESTS #####################
 
 # This test checks that we behave properly in an out-of-memory
@@ -52,6 +53,9 @@
 # time to do so.
 js1_5/Regress/regress-271716-n: SKIP
 
+# BUG(960): This test has an insane amount of output when it times out,
+# messing up ability to see other failures on the waterfall.
+js1_5/extensions/regress-342960: SKIP
 
 # This test uses a unitialized variable. A Bug has been filed:
 # https://bugzilla.mozilla.org/show_bug.cgi?id=575575
@@ -809,7 +813,37 @@
 js1_5/Regress/regress-271716-n: PASS || SKIP if $FAST == yes
 
 
-[ $FAST == yes && $ARCH == arm ]
+[ $arch == arm ]
+
+# Times out and print so much output that we need to skip it to not
+# hang the builder.
+js1_5/extensions/regress-342960: SKIP
+
+# BUG(3251229): Times out when running new crankshaft test script.
+ecma/Date/15.9.5.12-2: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Date/15.9.5.10-2: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma_3/RegExp/regress-311414: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/Regress/regress-203278-1: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma/Date/15.9.5.10-2: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Date/15.9.5.12-2: SKIP
+js1_5/Regress/regress-404755:  SKIP
+js1_5/Regress/regress-451322: SKIP
+js1_5/extensions/regress-371636: SKIP
+
+
+[ $arch == arm && $crankshaft ]
+
+# Test that only fail with crankshaft.
+js1_5/Regress/regress-416628: CRASH
+js1_5/Regress/regress-96128-n: PASS || CRASH
+
+
+[ $fast == yes && $arch == arm ]
 
 # In fast mode on arm we try to skip all tests that would time out,
 # since running the tests takes so long in the first place.
diff --git a/test/sputnik/README b/test/sputnik/README
index 3d39a67..94c689b 100644
--- a/test/sputnik/README
+++ b/test/sputnik/README
@@ -1,6 +1,6 @@
 To run the sputniktests you must check out the test suite from
 googlecode.com.  The test expectations are currently relative to
 version 28.  To get the tests run the following command within
-v8/tests/sputnik/
+v8/test/sputnik/
 
   svn co http://sputniktests.googlecode.com/svn/trunk/ -r28 sputniktests
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index bc8c1e3..966500d 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -193,7 +193,6 @@
 S9.9_A2: FAIL_OK
 
 
-
 ##################### SKIPPED TESTS #####################
 
 # These tests take a looong time to run in debug mode.
@@ -254,6 +253,20 @@
 S15.10.7_A3_T2: FAIL_OK
 S15.10.7_A3_T1: FAIL_OK
 
+[ $arch == arm ]
+
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+
 [ $arch == mips ]
 
 # Skip all tests on MIPS.
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index c1a5aab..6af6611 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -70,11 +70,12 @@
           'DEBUG',
           '_DEBUG',
           'ENABLE_DISASSEMBLER',
-          'V8_ENABLE_CHECKS'
+          'V8_ENABLE_CHECKS',
+          'OBJECT_PRINT',
         ],
         'msvs_settings': {
           'VCCLCompilerTool': {
-            'Optimizations': '0',
+            'Optimization': '0',
             
             'conditions': [
               ['OS=="win" and component=="shared_library"', {
@@ -134,7 +135,7 @@
             },
             'msvs_settings': {
               'VCCLCompilerTool': {
-                'Optimizations': '2',
+                'Optimization': '2',
                 'InlineFunctionExpansion': '2',
                 'EnableIntrinsicFunctions': 'true',
                 'FavorSizeOrSpeed': '0',
@@ -191,11 +192,43 @@
       ],
       'direct_dependent_settings': {
         'include_dirs': [
-          '../../include',
+           '../../include',
         ],
       },
     },
     {
+      'target_name': 'v8_preparser',
+      'include_dirs': [
+        '../../include',
+        '../../src',
+      ],
+      'sources': [
+        '../../src/allocation.cc',
+        '../../src/hashmap.cc',
+        '../../src/preparse-data.cc',
+        '../../src/preparser.cc',
+        '../../src/preparser-api.cc',
+        '../../src/scanner-base.cc',
+        '../../src/token.cc',
+        '../../src/unicode.cc',
+      ],
+      'conditions': [
+        ['OS=="win" and component=="shared_library"', {
+          'sources': [ '../../src/v8preparserdll-main.cc' ],
+          'defines': [ 'BUILDING_V8_SHARED' ],
+          'direct_dependent_settings': {
+            'defines': [ 'USING_V8_SHARED' ]
+          },
+          'type': '<(component)',
+        } , {
+          'type': 'none'
+        }],
+        ['OS!="win"', {
+          'type': '<(library)'
+        }],
+      ]
+    },
+    {
       'target_name': 'v8_snapshot',
       'type': '<(library)',
       'conditions': [
@@ -280,6 +313,7 @@
         '../../src/ast.cc',
         '../../src/ast-inl.h',
         '../../src/ast.h',
+        '../../src/atomicops_internals_x86_gcc.cc',
         '../../src/bignum.cc',
         '../../src/bignum.h',
         '../../src/bignum-dtoa.cc',
@@ -328,6 +362,8 @@
         '../../src/debug.h',
         '../../src/debug-agent.cc',
         '../../src/debug-agent.h',
+        '../../src/deoptimizer.cc',
+        '../../src/deoptimizer.h',	
         '../../src/disasm.h',
         '../../src/disassembler.cc',
         '../../src/disassembler.h',
@@ -369,6 +405,10 @@
         '../../src/heap.h',
         '../../src/heap-profiler.cc',
         '../../src/heap-profiler.h',
+        '../../src/hydrogen.cc',
+        '../../src/hydrogen.h',
+        '../../src/hydrogen-instructions.cc',
+        '../../src/hydrogen-instructions.h',
         '../../src/ic-inl.h',
         '../../src/ic.cc',
         '../../src/ic.h',
@@ -381,6 +421,8 @@
         '../../src/jsregexp.h',
         '../../src/list-inl.h',
         '../../src/list.h',
+        '../../src/lithium-allocator.cc',
+        '../../src/lithium-allocator.h',
         '../../src/liveedit.cc',
         '../../src/liveedit.h',
         '../../src/log-inl.h',
@@ -433,6 +475,10 @@
         '../../src/rewriter.h',
         '../../src/runtime.cc',
         '../../src/runtime.h',
+        '../../src/runtime-profiler.cc',
+        '../../src/runtime-profiler.h',
+        '../../src/safepoint-table.cc',
+        '../../src/safepoint-table.h',
         '../../src/scanner-base.cc',
         '../../src/scanner-base.h',
         '../../src/scanner.cc',
@@ -492,10 +538,10 @@
         '../../src/zone-inl.h',
         '../../src/zone.cc',
         '../../src/zone.h',
-	'../../src/extensions/externalize-string-extension.cc',
-	'../../src/extensions/externalize-string-extension.h',
-	'../../src/extensions/gc-extension.cc',
-	'../../src/extensions/gc-extension.h',
+        '../../src/extensions/externalize-string-extension.cc',
+        '../../src/extensions/externalize-string-extension.h',
+        '../../src/extensions/gc-extension.cc',
+        '../../src/extensions/gc-extension.h',
       ],
       'conditions': [
         ['v8_target_arch=="arm"', {
@@ -520,12 +566,17 @@
             '../../src/arm/constants-arm.cc',
             '../../src/arm/cpu-arm.cc',
             '../../src/arm/debug-arm.cc',
+            '../../src/arm/deoptimizer-arm.cc',
             '../../src/arm/disasm-arm.cc',
             '../../src/arm/frames-arm.cc',
             '../../src/arm/frames-arm.h',
             '../../src/arm/full-codegen-arm.cc',
             '../../src/arm/ic-arm.cc',
             '../../src/arm/jump-target-arm.cc',
+            '../../src/arm/lithium-codegen-arm.cc',
+            '../../src/arm/lithium-codegen-arm.h',
+            '../../src/arm/lithium-arm.cc',
+            '../../src/arm/lithium-arm.h',
             '../../src/arm/macro-assembler-arm.cc',
             '../../src/arm/macro-assembler-arm.h',
             '../../src/arm/regexp-macro-assembler-arm.cc',
@@ -566,12 +617,17 @@
             '../../src/ia32/codegen-ia32.h',
             '../../src/ia32/cpu-ia32.cc',
             '../../src/ia32/debug-ia32.cc',
+            '../../src/ia32/deoptimizer-ia32.cc',
             '../../src/ia32/disasm-ia32.cc',
             '../../src/ia32/frames-ia32.cc',
             '../../src/ia32/frames-ia32.h',
             '../../src/ia32/full-codegen-ia32.cc',
             '../../src/ia32/ic-ia32.cc',
             '../../src/ia32/jump-target-ia32.cc',
+            '../../src/ia32/lithium-codegen-ia32.cc',
+            '../../src/ia32/lithium-codegen-ia32.h',
+            '../../src/ia32/lithium-ia32.cc',
+            '../../src/ia32/lithium-ia32.h',
             '../../src/ia32/macro-assembler-ia32.cc',
             '../../src/ia32/macro-assembler-ia32.h',
             '../../src/ia32/regexp-macro-assembler-ia32.cc',
@@ -602,6 +658,7 @@
             '../../src/x64/codegen-x64.h',
             '../../src/x64/cpu-x64.cc',
             '../../src/x64/debug-x64.cc',
+            '../../src/x64/deoptimizer-x64.cc',
             '../../src/x64/disasm-x64.cc',
             '../../src/x64/frames-x64.cc',
             '../../src/x64/frames-x64.h',
diff --git a/tools/logreader.js b/tools/logreader.js
index b2aca73..50e3aa4 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -46,36 +46,6 @@
    * @type {Array.<Object>}
    */
   this.dispatchTable_ = dispatchTable;
-  this.dispatchTable_['alias'] =
-      { parsers: [null, null], processor: this.processAlias_ };
-  this.dispatchTable_['repeat'] =
-      { parsers: [parseInt, 'var-args'], processor: this.processRepeat_,
-        backrefs: true };
-
-  /**
-   * A key-value map for aliases. Translates short name -> full name.
-   * @type {Object}
-   */
-  this.aliases_ = {};
-
-  /**
-   * A key-value map for previous address values.
-   * @type {Object}
-   */
-  this.prevAddresses_ = {};
-
-  /**
-   * A key-value map for events than can be backreference-compressed.
-   * @type {Object}
-   */
-  this.backRefsCommands_ = {};
-  this.initBackRefsCommands_();
-
-  /**
-   * Back references for decompression.
-   * @type {Array.<string>}
-   */
-  this.backRefs_ = [];
 
   /**
    * Current line.
@@ -92,42 +62,6 @@
 
 
 /**
- * Creates a parser for an address entry.
- *
- * @param {string} addressTag Address tag to perform offset decoding.
- * @return {function(string):number} Address parser.
- */
-devtools.profiler.LogReader.prototype.createAddressParser = function(
-    addressTag) {
-  var self = this;
-  return (function (str) {
-    var value = parseInt(str, 16);
-    var firstChar = str.charAt(0);
-    if (firstChar == '+' || firstChar == '-') {
-      var addr = self.prevAddresses_[addressTag];
-      addr += value;
-      self.prevAddresses_[addressTag] = addr;
-      return addr;
-    } else if (firstChar != '0' || str.charAt(1) != 'x') {
-      self.prevAddresses_[addressTag] = value;
-    }
-    return value;
-  });
-};
-
-
-/**
- * Expands an alias symbol, if applicable.
- *
- * @param {string} symbol Symbol to expand.
- * @return {string} Expanded symbol, or the input symbol itself.
- */
-devtools.profiler.LogReader.prototype.expandAlias = function(symbol) {
-  return symbol in this.aliases_ ? this.aliases_[symbol] : symbol;
-};
-
-
-/**
  * Used for printing error messages.
  *
  * @param {string} str Error message.
@@ -234,68 +168,6 @@
 
 
 /**
- * Decompresses a line if it was backreference-compressed.
- *
- * @param {string} line Possibly compressed line.
- * @return {string} Decompressed line.
- * @private
- */
-devtools.profiler.LogReader.prototype.expandBackRef_ = function(line) {
-  var backRefPos;
-  // Filter out case when a regexp is created containing '#'.
-  if (line.charAt(line.length - 1) != '"'
-      && (backRefPos = line.lastIndexOf('#')) != -1) {
-    var backRef = line.substr(backRefPos + 1);
-    var backRefIdx = parseInt(backRef, 10) - 1;
-    var colonPos = backRef.indexOf(':');
-    var backRefStart =
-        colonPos != -1 ? parseInt(backRef.substr(colonPos + 1), 10) : 0;
-    line = line.substr(0, backRefPos) +
-        this.backRefs_[backRefIdx].substr(backRefStart);
-  }
-  this.backRefs_.unshift(line);
-  if (this.backRefs_.length > 10) {
-    this.backRefs_.length = 10;
-  }
-  return line;
-};
-
-
-/**
- * Initializes the map of backward reference compressible commands.
- * @private
- */
-devtools.profiler.LogReader.prototype.initBackRefsCommands_ = function() {
-  for (var event in this.dispatchTable_) {
-    var dispatch = this.dispatchTable_[event];
-    if (dispatch && dispatch.backrefs) {
-      this.backRefsCommands_[event] = true;
-    }
-  }
-};
-
-
-/**
- * Processes alias log record. Adds an alias to a corresponding map.
- *
- * @param {string} symbol Short name.
- * @param {string} expansion Long name.
- * @private
- */
-devtools.profiler.LogReader.prototype.processAlias_ = function(
-    symbol, expansion) {
-  if (expansion in this.dispatchTable_) {
-    this.dispatchTable_[symbol] = this.dispatchTable_[expansion];
-    if (expansion in this.backRefsCommands_) {
-      this.backRefsCommands_[symbol] = true;
-    }
-  } else {
-    this.aliases_[symbol] = expansion;
-  }
-};
-
-
-/**
  * Processes log lines.
  *
  * @param {Array.<string>} lines Log lines.
@@ -308,10 +180,6 @@
       continue;
     }
     try {
-      if (line.charAt(0) == '#' ||
-          line.substr(0, line.indexOf(',')) in this.backRefsCommands_) {
-        line = this.expandBackRef_(line);
-      }
       var fields = this.csvParser_.parseLine(line);
       this.dispatchLogRow_(fields);
     } catch (e) {
@@ -319,20 +187,3 @@
     }
   }
 };
-
-
-/**
- * Processes repeat log record. Expands it according to calls count and
- * invokes processing.
- *
- * @param {number} count Count.
- * @param {Array.<string>} cmd Parsed command.
- * @private
- */
-devtools.profiler.LogReader.prototype.processRepeat_ = function(count, cmd) {
-  // Replace the repeat-prefixed command from backrefs list with a non-prefixed.
-  this.backRefs_[0] = cmd.join(',');
-  for (var i = 0; i < count; ++i) {
-    this.dispatchLogRow_(cmd);
-  }
-};
diff --git a/tools/test.py b/tools/test.py
index 4b916f8..7348c62 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -358,7 +358,7 @@
     full_command = self.context.processor(command)
     output = Execute(full_command,
                      self.context,
-                     self.context.GetTimeout(self.mode))
+                     self.context.GetTimeout(self, self.mode))
     self.Cleanup()
     return TestOutput(self,
                       full_command,
@@ -569,7 +569,7 @@
 
 # Use this to run several variants of the tests, e.g.:
 # VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
-VARIANT_FLAGS = [[]]
+VARIANT_FLAGS = [[], ['--stress-opt', '--always-opt'], ['--nocrankshaft']]
 
 
 class TestRepository(TestSuite):
@@ -673,8 +673,12 @@
   def GetVmFlags(self, testcase, mode):
     return testcase.variant_flags + FLAGS[mode]
 
-  def GetTimeout(self, mode):
-    return self.timeout * TIMEOUT_SCALEFACTOR[mode]
+  def GetTimeout(self, testcase, mode):
+    result = self.timeout * TIMEOUT_SCALEFACTOR[mode]
+    if '--stress-opt' in self.GetVmFlags(testcase, mode):
+      return result * 2
+    else:
+      return result
 
 def RunTestCases(cases_to_run, progress, tasks):
   progress = PROGRESS_INDICATORS[progress](cases_to_run)
@@ -725,6 +729,9 @@
     if self.name in env: return ListSet([env[self.name]])
     else: return Nothing()
 
+  def Evaluate(self, env, defs):
+    return env[self.name]
+
 
 class Outcome(Expression):
 
@@ -1159,12 +1166,23 @@
   result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
         dest="suppress_dialogs", action="store_false")
   result.add_option("--shell", help="Path to V8 shell", default="shell")
-  result.add_option("--store-unexpected-output", 
+  result.add_option("--store-unexpected-output",
       help="Store the temporary JS files from tests that fails",
       dest="store_unexpected_output", default=True, action="store_true")
-  result.add_option("--no-store-unexpected-output", 
+  result.add_option("--no-store-unexpected-output",
       help="Deletes the temporary JS files from tests that fails",
       dest="store_unexpected_output", action="store_false")
+  result.add_option("--stress-only",
+                    help="Only run tests with --always-opt --stress-opt",
+                    default=False, action="store_true")
+  result.add_option("--nostress",
+                    help="Don't run crankshaft --always-opt --stress-op test",
+                    default=False, action="store_true")
+  result.add_option("--crankshaft",
+                    help="Run with the --crankshaft flag",
+                    default=False, action="store_true")
+  result.add_option("--noprof", help="Disable profiling support",
+                    default=False)
   return result
 
 
@@ -1194,6 +1212,19 @@
     options.scons_flags.append("arch=" + options.arch)
   if options.snapshot:
     options.scons_flags.append("snapshot=on")
+  global VARIANT_FLAGS
+  if options.stress_only:
+    VARIANT_FLAGS = [['--stress-opt', '--always-opt']]
+  if options.nostress:
+    VARIANT_FLAGS = [[],['--nocrankshaft']]
+  if options.crankshaft:
+    if options.special_command:
+      options.special_command += " --crankshaft"
+    else:
+      options.special_command = "@--crankshaft"
+  if options.noprof:
+    options.scons_flags.append("prof=off")
+    options.scons_flags.append("profilingsupport=off")
   return True
 
 
@@ -1344,7 +1375,8 @@
         'mode': mode,
         'system': utils.GuessOS(),
         'arch': options.arch,
-        'simulator': options.simulator
+        'simulator': options.simulator,
+        'crankshaft': options.crankshaft
       }
       test_list = root.ListTests([], path, context, mode)
       unclassified_tests += test_list
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index a3e14c3..87864d1 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -60,18 +60,17 @@
 function SnapshotLogProcessor() {
   devtools.profiler.LogReader.call(this, {
       'code-creation': {
-          parsers: [null, this.createAddressParser('code'), parseInt, null],
-          processor: this.processCodeCreation, backrefs: true },
-      'code-move': { parsers: [this.createAddressParser('code'),
-          this.createAddressParser('code-move-to')],
-          processor: this.processCodeMove, backrefs: true },
-      'code-delete': { parsers: [this.createAddressParser('code')],
-          processor: this.processCodeDelete, backrefs: true },
+          parsers: [null, parseInt, parseInt, null],
+          processor: this.processCodeCreation },
+      'code-move': { parsers: [parseInt, parseInt],
+          processor: this.processCodeMove },
+      'code-delete': { parsers: [parseInt],
+          processor: this.processCodeDelete },
       'function-creation': null,
       'function-move': null,
       'function-delete': null,
-      'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
-          processor: this.processSnapshotPosition, backrefs: true }});
+      'snapshot-pos': { parsers: [parseInt, parseInt],
+          processor: this.processSnapshotPosition }});
 
   Profile.prototype.handleUnknownCode = function(operation, addr) {
     var op = devtools.profiler.Profile.Operation;
@@ -95,8 +94,7 @@
 
 SnapshotLogProcessor.prototype.processCodeCreation = function(
     type, start, size, name) {
-  var entry = this.profile_.addCode(
-      this.expandAlias(type), name, start, size);
+  var entry = this.profile_.addCode(type, name, start, size);
 };
 
 
@@ -133,33 +131,28 @@
       'shared-library': { parsers: [null, parseInt, parseInt],
           processor: this.processSharedLibrary },
       'code-creation': {
-          parsers: [null, this.createAddressParser('code'), parseInt, null],
-          processor: this.processCodeCreation, backrefs: true },
-      'code-move': { parsers: [this.createAddressParser('code'),
-          this.createAddressParser('code-move-to')],
-          processor: this.processCodeMove, backrefs: true },
-      'code-delete': { parsers: [this.createAddressParser('code')],
-          processor: this.processCodeDelete, backrefs: true },
-      'function-creation': { parsers: [this.createAddressParser('code'),
-          this.createAddressParser('function-obj')],
-          processor: this.processFunctionCreation, backrefs: true },
-      'function-move': { parsers: [this.createAddressParser('code'),
-          this.createAddressParser('code-move-to')],
-          processor: this.processFunctionMove, backrefs: true },
-      'function-delete': { parsers: [this.createAddressParser('code')],
-          processor: this.processFunctionDelete, backrefs: true },
-      'snapshot-pos': { parsers: [this.createAddressParser('code'), parseInt],
-          processor: this.processSnapshotPosition, backrefs: true },
-      'tick': { parsers: [this.createAddressParser('code'),
-          this.createAddressParser('stack'),
-          this.createAddressParser('func'), parseInt, 'var-args'],
-          processor: this.processTick, backrefs: true },
+          parsers: [null, parseInt, parseInt, null],
+          processor: this.processCodeCreation },
+      'code-move': { parsers: [parseInt, parseInt],
+          processor: this.processCodeMove },
+      'code-delete': { parsers: [parseInt],
+          processor: this.processCodeDelete },
+      'function-creation': { parsers: [parseInt, parseInt],
+          processor: this.processFunctionCreation },
+      'function-move': { parsers: [parseInt, parseInt],
+          processor: this.processFunctionMove },
+      'function-delete': { parsers: [parseInt],
+          processor: this.processFunctionDelete },
+      'snapshot-pos': { parsers: [parseInt, parseInt],
+          processor: this.processSnapshotPosition },
+      'tick': { parsers: [parseInt, parseInt, parseInt, parseInt, 'var-args'],
+          processor: this.processTick },
       'heap-sample-begin': { parsers: [null, null, parseInt],
           processor: this.processHeapSampleBegin },
       'heap-sample-end': { parsers: [null, null],
           processor: this.processHeapSampleEnd },
       'heap-js-prod-item': { parsers: [null, 'var-args'],
-          processor: this.processJSProducer, backrefs: true },
+          processor: this.processJSProducer },
       // Ignored events.
       'profiler': null,
       'heap-sample-stats': null,
@@ -294,8 +287,7 @@
 TickProcessor.prototype.processCodeCreation = function(
     type, start, size, name) {
   name = this.deserializedEntriesNames_[start] || name;
-  var entry = this.profile_.addCode(
-      this.expandAlias(type), name, start, size);
+  var entry = this.profile_.addCode(type, name, start, size);
 };
 
 
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 08558cc..5254c6e 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -13,6 +13,7 @@
 			buildPhases = (
 			);
 			dependencies = (
+				89EED40D12B69A0A0075BE1C /* PBXTargetDependency */,
 				7BF891970E73099F000BAF8A /* PBXTargetDependency */,
 				7BF891990E73099F000BAF8A /* PBXTargetDependency */,
 				893988100F2A3647007D5254 /* PBXTargetDependency */,
@@ -50,6 +51,46 @@
 		893A72240F7B101400303DD2 /* platform-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893A72230F7B0FF200303DD2 /* platform-posix.cc */; };
 		893A72250F7B101B00303DD2 /* platform-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893A72230F7B0FF200303DD2 /* platform-posix.cc */; };
 		893CCE640E71D83700357A03 /* code-stubs.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1110E719B8F00D62E90 /* code-stubs.cc */; };
+		893E24A812B14B3D0083370F /* bignum-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248612B14B3D0083370F /* bignum-dtoa.cc */; };
+		893E24A912B14B3D0083370F /* bignum.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248812B14B3D0083370F /* bignum.cc */; };
+		893E24AA12B14B3D0083370F /* cached-powers.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248A12B14B3D0083370F /* cached-powers.cc */; };
+		893E24AB12B14B3D0083370F /* deoptimizer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248B12B14B3D0083370F /* deoptimizer.cc */; };
+		893E24AC12B14B3D0083370F /* hydrogen-instructions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248D12B14B3D0083370F /* hydrogen-instructions.cc */; };
+		893E24AD12B14B3D0083370F /* hydrogen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248F12B14B3D0083370F /* hydrogen.cc */; };
+		893E24AE12B14B3D0083370F /* lithium-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249312B14B3D0083370F /* lithium-allocator.cc */; };
+		893E24AF12B14B3D0083370F /* preparse-data.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249512B14B3D0083370F /* preparse-data.cc */; };
+		893E24B012B14B3D0083370F /* preparser-api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249712B14B3D0083370F /* preparser-api.cc */; };
+		893E24B112B14B3D0083370F /* preparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249812B14B3D0083370F /* preparser.cc */; };
+		893E24B212B14B3D0083370F /* runtime-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249A12B14B3D0083370F /* runtime-profiler.cc */; };
+		893E24B312B14B3D0083370F /* safepoint-table.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249C12B14B3D0083370F /* safepoint-table.cc */; };
+		893E24B412B14B3D0083370F /* scanner-base.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249E12B14B3D0083370F /* scanner-base.cc */; };
+		893E24B512B14B3D0083370F /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A112B14B3D0083370F /* string-search.cc */; };
+		893E24B612B14B3D0083370F /* strtod.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A312B14B3D0083370F /* strtod.cc */; };
+		893E24B712B14B3D0083370F /* bignum-dtoa.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248612B14B3D0083370F /* bignum-dtoa.cc */; };
+		893E24B812B14B3D0083370F /* bignum.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248812B14B3D0083370F /* bignum.cc */; };
+		893E24B912B14B3D0083370F /* cached-powers.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248A12B14B3D0083370F /* cached-powers.cc */; };
+		893E24BA12B14B3D0083370F /* deoptimizer.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248B12B14B3D0083370F /* deoptimizer.cc */; };
+		893E24BB12B14B3D0083370F /* hydrogen-instructions.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248D12B14B3D0083370F /* hydrogen-instructions.cc */; };
+		893E24BC12B14B3D0083370F /* hydrogen.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E248F12B14B3D0083370F /* hydrogen.cc */; };
+		893E24BD12B14B3D0083370F /* lithium-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249312B14B3D0083370F /* lithium-allocator.cc */; };
+		893E24BE12B14B3D0083370F /* preparse-data.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249512B14B3D0083370F /* preparse-data.cc */; };
+		893E24BF12B14B3D0083370F /* preparser-api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249712B14B3D0083370F /* preparser-api.cc */; };
+		893E24C012B14B3D0083370F /* preparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249812B14B3D0083370F /* preparser.cc */; };
+		893E24C112B14B3D0083370F /* runtime-profiler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249A12B14B3D0083370F /* runtime-profiler.cc */; };
+		893E24C212B14B3D0083370F /* safepoint-table.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249C12B14B3D0083370F /* safepoint-table.cc */; };
+		893E24C312B14B3D0083370F /* scanner-base.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E249E12B14B3D0083370F /* scanner-base.cc */; };
+		893E24C412B14B3D0083370F /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A112B14B3D0083370F /* string-search.cc */; };
+		893E24C512B14B3D0083370F /* strtod.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24A312B14B3D0083370F /* strtod.cc */; };
+		893E24CC12B14B520083370F /* deoptimizer-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24C612B14B510083370F /* deoptimizer-arm.cc */; };
+		893E24CD12B14B520083370F /* lithium-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24C712B14B510083370F /* lithium-arm.cc */; };
+		893E24CE12B14B520083370F /* lithium-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24C912B14B520083370F /* lithium-codegen-arm.cc */; };
+		893E24D512B14B8A0083370F /* deoptimizer-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D012B14B8A0083370F /* deoptimizer-ia32.cc */; };
+		893E24D612B14B8A0083370F /* lithium-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D112B14B8A0083370F /* lithium-codegen-ia32.cc */; };
+		893E24D712B14B8A0083370F /* lithium-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D312B14B8A0083370F /* lithium-ia32.cc */; };
+		893E24DC12B14B9F0083370F /* externalize-string-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D812B14B9F0083370F /* externalize-string-extension.cc */; };
+		893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24DA12B14B9F0083370F /* gc-extension.cc */; };
+		893E24DE12B14B9F0083370F /* externalize-string-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24D812B14B9F0083370F /* externalize-string-extension.cc */; };
+		893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893E24DA12B14B9F0083370F /* gc-extension.cc */; };
 		8944AD100F1D4D500028D560 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
 		8944AD110F1D4D570028D560 /* regexp-stack.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */; };
 		894599A30F5D8729008DA8FB /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
@@ -58,7 +99,12 @@
 		8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
 		895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895FA748107FFE73006F39D4 /* constants-arm.cc */; };
 		896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
-		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; };
+		897C77D012B68E3D000767A8 /* d8-debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988150F2A3686007D5254 /* d8-debug.cc */; };
+		897C77D112B68E3D000767A8 /* d8-js.cc in Sources */ = {isa = PBXBuildFile; fileRef = 893988320F2A3B8B007D5254 /* d8-js.cc */; };
+		897C77D212B68E3D000767A8 /* d8-posix.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89FB0E360F8E531900B04B3C /* d8-posix.cc */; };
+		897C77D312B68E3D000767A8 /* d8.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C920EE46A1700B48DEB /* d8.cc */; };
+		897C77DD12B68E6E000767A8 /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
+		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; settings = {COMPILER_FLAGS = "-I../include"; }; };
 		897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
 		8981F6001010501900D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
 		8981F6011010502800D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
@@ -90,7 +136,6 @@
 		89A88DFF0E71A6530043BA31 /* debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1280E719B8F00D62E90 /* debug.cc */; };
 		89A88E000E71A6540043BA31 /* disasm-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12B0E719B8F00D62E90 /* disasm-ia32.cc */; };
 		89A88E010E71A6550043BA31 /* disassembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12D0E719B8F00D62E90 /* disassembler.cc */; };
-		89A88E020E71A65A0043BA31 /* dtoa-config.c in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12F0E719B8F00D62E90 /* dtoa-config.c */; };
 		89A88E030E71A65B0043BA31 /* execution.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1300E719B8F00D62E90 /* execution.cc */; };
 		89A88E040E71A65D0043BA31 /* factory.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1320E719B8F00D62E90 /* factory.cc */; };
 		89A88E050E71A65D0043BA31 /* flags.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1350E719B8F00D62E90 /* flags.cc */; };
@@ -122,7 +167,6 @@
 		89A88E1F0E71A6B40043BA31 /* snapshot-common.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1820E719B8F00D62E90 /* snapshot-common.cc */; };
 		89A88E200E71A6B60043BA31 /* snapshot-empty.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1830E719B8F00D62E90 /* snapshot-empty.cc */; };
 		89A88E210E71A6B70043BA31 /* spaces.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1860E719B8F00D62E90 /* spaces.cc */; };
-		89A88E220E71A6BC0043BA31 /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-search.cc */; };
 		89A88E220E71A6BC0043BA31 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
 		89A88E230E71A6BE0043BA31 /* stub-cache-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18B0E719B8F00D62E90 /* stub-cache-ia32.cc */; };
 		89A88E240E71A6BF0043BA31 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
@@ -154,7 +198,6 @@
 		89F23C510E78D5B2006B2466 /* dateparser.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1260E719B8F00D62E90 /* dateparser.cc */; };
 		89F23C520E78D5B2006B2466 /* debug.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1280E719B8F00D62E90 /* debug.cc */; };
 		89F23C540E78D5B2006B2466 /* disassembler.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12D0E719B8F00D62E90 /* disassembler.cc */; };
-		89F23C550E78D5B2006B2466 /* dtoa-config.c in Sources */ = {isa = PBXBuildFile; fileRef = 897FF12F0E719B8F00D62E90 /* dtoa-config.c */; };
 		89F23C560E78D5B2006B2466 /* execution.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1300E719B8F00D62E90 /* execution.cc */; };
 		89F23C570E78D5B2006B2466 /* factory.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1320E719B8F00D62E90 /* factory.cc */; };
 		89F23C580E78D5B2006B2466 /* flags.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1350E719B8F00D62E90 /* flags.cc */; };
@@ -184,7 +227,6 @@
 		89F23C730E78D5B2006B2466 /* snapshot-common.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1820E719B8F00D62E90 /* snapshot-common.cc */; };
 		89F23C740E78D5B2006B2466 /* snapshot-empty.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1830E719B8F00D62E90 /* snapshot-empty.cc */; };
 		89F23C750E78D5B2006B2466 /* spaces.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1860E719B8F00D62E90 /* spaces.cc */; };
-		89F23C760E78D5B2006B2466 /* string-search.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-search.cc */; };
 		89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1880E719B8F00D62E90 /* string-stream.cc */; };
 		89F23C780E78D5B2006B2466 /* stub-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18C0E719B8F00D62E90 /* stub-cache.cc */; };
 		89F23C790E78D5B2006B2466 /* token.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF18E0E719B8F00D62E90 /* token.cc */; };
@@ -196,7 +238,7 @@
 		89F23C800E78D5B2006B2466 /* v8threads.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19D0E719B8F00D62E90 /* v8threads.cc */; };
 		89F23C810E78D5B2006B2466 /* variables.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF19F0E719B8F00D62E90 /* variables.cc */; };
 		89F23C820E78D5B2006B2466 /* zone.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1A20E719B8F00D62E90 /* zone.cc */; };
-		89F23C8E0E78D5B6006B2466 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; };
+		89F23C8E0E78D5B6006B2466 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; settings = {COMPILER_FLAGS = "-I../include"; }; };
 		89F23C970E78D5E3006B2466 /* assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FE0E719B8F00D62E90 /* assembler-arm.cc */; };
 		89F23C980E78D5E7006B2466 /* builtins-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1090E719B8F00D62E90 /* builtins-arm.cc */; };
 		89F23C990E78D5E9006B2466 /* codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1140E719B8F00D62E90 /* codegen-arm.cc */; };
@@ -300,6 +342,13 @@
 			remoteGlobalIDString = 89F23C880E78D5B6006B2466;
 			remoteInfo = "v8_shell-arm";
 		};
+		897C77DB12B68E5D000767A8 /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 89F23C3C0E78D5B2006B2466;
+			remoteInfo = "v8-arm";
+		};
 		897F76820E71B6AC007ACF34 /* PBXContainerItemProxy */ = {
 			isa = PBXContainerItemProxy;
 			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
@@ -307,6 +356,13 @@
 			remoteGlobalIDString = 8970F2EF0E719FB2006AE7B5;
 			remoteInfo = v8;
 		};
+		89EED40C12B69A0A0075BE1C /* PBXContainerItemProxy */ = {
+			isa = PBXContainerItemProxy;
+			containerPortal = 8915B8680E719336009C4E19 /* Project object */;
+			proxyType = 1;
+			remoteGlobalIDString = 897C77CB12B68E3D000767A8;
+			remoteInfo = "d8_shell-arm";
+		};
 /* End PBXContainerItemProxy section */
 
 /* Begin PBXFileReference section */
@@ -327,13 +383,68 @@
 		58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; };
 		8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
 		893986D40F29020C007D5254 /* apiutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apiutils.h; sourceTree = "<group>"; };
-		8939880B0F2A35FA007D5254 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
+		8939880B0F2A35FA007D5254 /* d8 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = d8; sourceTree = BUILT_PRODUCTS_DIR; };
 		893988150F2A3686007D5254 /* d8-debug.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "d8-debug.cc"; path = "../src/d8-debug.cc"; sourceTree = "<group>"; };
 		893988320F2A3B8B007D5254 /* d8-js.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "d8-js.cc"; sourceTree = "<group>"; };
 		893A72230F7B0FF200303DD2 /* platform-posix.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-posix.cc"; sourceTree = "<group>"; };
 		893A722A0F7B4A3200303DD2 /* dateparser-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "dateparser-inl.h"; sourceTree = "<group>"; };
 		893A722D0F7B4A7100303DD2 /* register-allocator-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "register-allocator-inl.h"; sourceTree = "<group>"; };
 		893A72320F7B4AD700303DD2 /* d8-debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "d8-debug.h"; path = "../src/d8-debug.h"; sourceTree = "<group>"; };
+		893E248112B14AD40083370F /* v8-preparser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-preparser.h"; sourceTree = "<group>"; };
+		893E248212B14AD40083370F /* v8-testing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-testing.h"; sourceTree = "<group>"; };
+		893E248312B14AD40083370F /* v8stdint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8stdint.h; sourceTree = "<group>"; };
+		893E248412B14B3D0083370F /* ast-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ast-inl.h"; sourceTree = "<group>"; };
+		893E248512B14B3D0083370F /* atomicops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomicops.h; sourceTree = "<group>"; };
+		893E248612B14B3D0083370F /* bignum-dtoa.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "bignum-dtoa.cc"; sourceTree = "<group>"; };
+		893E248712B14B3D0083370F /* bignum-dtoa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "bignum-dtoa.h"; sourceTree = "<group>"; };
+		893E248812B14B3D0083370F /* bignum.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = bignum.cc; sourceTree = "<group>"; };
+		893E248912B14B3D0083370F /* bignum.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bignum.h; sourceTree = "<group>"; };
+		893E248A12B14B3D0083370F /* cached-powers.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "cached-powers.cc"; sourceTree = "<group>"; };
+		893E248B12B14B3D0083370F /* deoptimizer.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = deoptimizer.cc; sourceTree = "<group>"; };
+		893E248C12B14B3D0083370F /* deoptimizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = deoptimizer.h; sourceTree = "<group>"; };
+		893E248D12B14B3D0083370F /* hydrogen-instructions.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "hydrogen-instructions.cc"; sourceTree = "<group>"; };
+		893E248E12B14B3D0083370F /* hydrogen-instructions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "hydrogen-instructions.h"; sourceTree = "<group>"; };
+		893E248F12B14B3D0083370F /* hydrogen.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = hydrogen.cc; sourceTree = "<group>"; };
+		893E249012B14B3D0083370F /* hydrogen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hydrogen.h; sourceTree = "<group>"; };
+		893E249112B14B3D0083370F /* jump-target-heavy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-heavy.h"; sourceTree = "<group>"; };
+		893E249212B14B3D0083370F /* jump-target-light.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-light.h"; sourceTree = "<group>"; };
+		893E249312B14B3D0083370F /* lithium-allocator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "lithium-allocator.cc"; sourceTree = "<group>"; };
+		893E249412B14B3D0083370F /* lithium-allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "lithium-allocator.h"; sourceTree = "<group>"; };
+		893E249512B14B3D0083370F /* preparse-data.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "preparse-data.cc"; sourceTree = "<group>"; };
+		893E249612B14B3D0083370F /* preparse-data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "preparse-data.h"; sourceTree = "<group>"; };
+		893E249712B14B3D0083370F /* preparser-api.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "preparser-api.cc"; sourceTree = "<group>"; };
+		893E249812B14B3D0083370F /* preparser.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = preparser.cc; sourceTree = "<group>"; };
+		893E249912B14B3D0083370F /* preparser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = preparser.h; sourceTree = "<group>"; };
+		893E249A12B14B3D0083370F /* runtime-profiler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "runtime-profiler.cc"; sourceTree = "<group>"; };
+		893E249B12B14B3D0083370F /* runtime-profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "runtime-profiler.h"; sourceTree = "<group>"; };
+		893E249C12B14B3D0083370F /* safepoint-table.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "safepoint-table.cc"; sourceTree = "<group>"; };
+		893E249D12B14B3D0083370F /* safepoint-table.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "safepoint-table.h"; sourceTree = "<group>"; };
+		893E249E12B14B3D0083370F /* scanner-base.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "scanner-base.cc"; sourceTree = "<group>"; };
+		893E249F12B14B3D0083370F /* scanner-base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "scanner-base.h"; sourceTree = "<group>"; };
+		893E24A012B14B3D0083370F /* simulator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = simulator.h; sourceTree = "<group>"; };
+		893E24A112B14B3D0083370F /* string-search.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "string-search.cc"; sourceTree = "<group>"; };
+		893E24A212B14B3D0083370F /* string-search.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "string-search.h"; sourceTree = "<group>"; };
+		893E24A312B14B3D0083370F /* strtod.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = strtod.cc; sourceTree = "<group>"; };
+		893E24A412B14B3D0083370F /* strtod.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = strtod.h; sourceTree = "<group>"; };
+		893E24A512B14B3D0083370F /* v8checks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8checks.h; sourceTree = "<group>"; };
+		893E24A612B14B3D0083370F /* v8globals.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8globals.h; sourceTree = "<group>"; };
+		893E24A712B14B3D0083370F /* v8utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8utils.h; sourceTree = "<group>"; };
+		893E24C612B14B510083370F /* deoptimizer-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "deoptimizer-arm.cc"; path = "arm/deoptimizer-arm.cc"; sourceTree = "<group>"; };
+		893E24C712B14B510083370F /* lithium-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-arm.cc"; path = "arm/lithium-arm.cc"; sourceTree = "<group>"; };
+		893E24C812B14B510083370F /* lithium-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-arm.h"; path = "arm/lithium-arm.h"; sourceTree = "<group>"; };
+		893E24C912B14B520083370F /* lithium-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-arm.cc"; path = "arm/lithium-codegen-arm.cc"; sourceTree = "<group>"; };
+		893E24CA12B14B520083370F /* lithium-codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-codegen-arm.h"; path = "arm/lithium-codegen-arm.h"; sourceTree = "<group>"; };
+		893E24CB12B14B520083370F /* virtual-frame-arm-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-arm-inl.h"; path = "arm/virtual-frame-arm-inl.h"; sourceTree = "<group>"; };
+		893E24CF12B14B780083370F /* atomicops_internals_x86_macosx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomicops_internals_x86_macosx.h; sourceTree = "<group>"; };
+		893E24D012B14B8A0083370F /* deoptimizer-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "deoptimizer-ia32.cc"; path = "ia32/deoptimizer-ia32.cc"; sourceTree = "<group>"; };
+		893E24D112B14B8A0083370F /* lithium-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-ia32.cc"; path = "ia32/lithium-codegen-ia32.cc"; sourceTree = "<group>"; };
+		893E24D212B14B8A0083370F /* lithium-codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-codegen-ia32.h"; path = "ia32/lithium-codegen-ia32.h"; sourceTree = "<group>"; };
+		893E24D312B14B8A0083370F /* lithium-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-ia32.cc"; path = "ia32/lithium-ia32.cc"; sourceTree = "<group>"; };
+		893E24D412B14B8A0083370F /* lithium-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-ia32.h"; path = "ia32/lithium-ia32.h"; sourceTree = "<group>"; };
+		893E24D812B14B9F0083370F /* externalize-string-extension.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "externalize-string-extension.cc"; path = "extensions/externalize-string-extension.cc"; sourceTree = "<group>"; };
+		893E24D912B14B9F0083370F /* externalize-string-extension.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "externalize-string-extension.h"; path = "extensions/externalize-string-extension.h"; sourceTree = "<group>"; };
+		893E24DA12B14B9F0083370F /* gc-extension.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "gc-extension.cc"; path = "extensions/gc-extension.cc"; sourceTree = "<group>"; };
+		893E24DB12B14B9F0083370F /* gc-extension.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "gc-extension.h"; path = "extensions/gc-extension.h"; sourceTree = "<group>"; };
 		8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "regexp-stack.cc"; sourceTree = "<group>"; };
 		8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "regexp-stack.h"; sourceTree = "<group>"; };
 		89471C7F0EB23EE400B6874B /* flag-definitions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "flag-definitions.h"; sourceTree = "<group>"; };
@@ -352,6 +463,7 @@
 		8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32.h"; path = "ia32/codegen-ia32.h"; sourceTree = "<group>"; };
 		896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm.h"; path = "arm/codegen-arm.h"; sourceTree = "<group>"; };
 		8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; };
+		897C77D912B68E3D000767A8 /* d8-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
 		897F767A0E71B4CC007ACF34 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
 		897FF0D40E719A8500D62E90 /* v8-debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-debug.h"; sourceTree = "<group>"; };
 		897FF0D50E719A8500D62E90 /* v8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = v8.h; sourceTree = "<group>"; };
@@ -414,7 +526,6 @@
 		897FF12C0E719B8F00D62E90 /* disasm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = disasm.h; sourceTree = "<group>"; };
 		897FF12D0E719B8F00D62E90 /* disassembler.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = disassembler.cc; sourceTree = "<group>"; };
 		897FF12E0E719B8F00D62E90 /* disassembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = disassembler.h; sourceTree = "<group>"; };
-		897FF12F0E719B8F00D62E90 /* dtoa-config.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "dtoa-config.c"; sourceTree = "<group>"; };
 		897FF1300E719B8F00D62E90 /* execution.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = execution.cc; sourceTree = "<group>"; };
 		897FF1310E719B8F00D62E90 /* execution.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = execution.h; sourceTree = "<group>"; };
 		897FF1320E719B8F00D62E90 /* factory.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = factory.cc; sourceTree = "<group>"; };
@@ -502,8 +613,6 @@
 		897FF1850E719B8F00D62E90 /* spaces-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "spaces-inl.h"; sourceTree = "<group>"; };
 		897FF1860E719B8F00D62E90 /* spaces.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = spaces.cc; sourceTree = "<group>"; };
 		897FF1870E719B8F00D62E90 /* spaces.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spaces.h; sourceTree = "<group>"; };
-		897FF1880E719B8F00D62E90 /* string-search.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "string-search.cc"; sourceTree = "<group>"; };
-		897FF1880E719B8F00D62E90 /* string-search.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "string-search.h"; sourceTree = "<group>"; };
 		897FF1880E719B8F00D62E90 /* string-stream.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "string-stream.cc"; sourceTree = "<group>"; };
 		897FF1890E719B8F00D62E90 /* string-stream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "string-stream.h"; sourceTree = "<group>"; };
 		897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "stub-cache-arm.cc"; path = "arm/stub-cache-arm.cc"; sourceTree = "<group>"; };
@@ -603,7 +712,6 @@
 		9FA38BA81175B2D200C4CD55 /* jump-target-light-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-light-inl.h"; sourceTree = "<group>"; };
 		9FA38BA91175B2D200C4CD55 /* liveedit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = liveedit.cc; sourceTree = "<group>"; };
 		9FA38BAA1175B2D200C4CD55 /* liveedit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = liveedit.h; sourceTree = "<group>"; };
-		9FA38BAB1175B2D200C4CD55 /* powers-ten.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "powers-ten.h"; sourceTree = "<group>"; };
 		9FA38BAC1175B2D200C4CD55 /* splay-tree-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "splay-tree-inl.h"; sourceTree = "<group>"; };
 		9FA38BAD1175B2D200C4CD55 /* splay-tree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "splay-tree.h"; sourceTree = "<group>"; };
 		9FA38BAE1175B2D200C4CD55 /* type-info.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "type-info.cc"; sourceTree = "<group>"; };
@@ -649,6 +757,14 @@
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
+		897C77D412B68E3D000767A8 /* Frameworks */ = {
+			isa = PBXFrameworksBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				897C77DD12B68E6E000767A8 /* libv8-arm.a in Frameworks */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
 		897F76780E71B4CC007ACF34 /* Frameworks */ = {
 			isa = PBXFrameworksBuildPhase;
 			buildActionMask = 2147483647;
@@ -683,54 +799,14 @@
 			);
 			sourceTree = "<group>";
 		};
-		897FF0CF0E71996900D62E90 /* v8 */ = {
+		893E24E212B14BD20083370F /* C++ */ = {
 			isa = PBXGroup;
 			children = (
-				897FF0D10E71999E00D62E90 /* include */,
-				897FF1B30E719BCE00D62E90 /* samples */,
-				897FF0D00E71999800D62E90 /* src */,
-				897FF1B40E719BE800D62E90 /* tools */,
-			);
-			name = v8;
-			path = ..;
-			sourceTree = "<group>";
-		};
-		897FF0D00E71999800D62E90 /* src */ = {
-			isa = PBXGroup;
-			children = (
-				897FF0D70E719AB300D62E90 /* C++ */,
-				89A9C1630E71C8E300BE6CCA /* generated */,
-				897FF0D80E719ABA00D62E90 /* js */,
-				897FF0DE0E719B3400D62E90 /* third_party */,
-			);
-			path = src;
-			sourceTree = "<group>";
-		};
-		897FF0D10E71999E00D62E90 /* include */ = {
-			isa = PBXGroup;
-			children = (
-				897FF0D40E719A8500D62E90 /* v8-debug.h */,
-				9FA36F62116BA26500C4CD55 /* v8-profiler.h */,
-				897FF0D50E719A8500D62E90 /* v8.h */,
-			);
-			path = include;
-			sourceTree = "<group>";
-		};
-		897FF0D70E719AB300D62E90 /* C++ */ = {
-			isa = PBXGroup;
-			children = (
-				C68081B412251257001EAFE4 /* code-stubs-ia32.h */,
-				C68081B012251239001EAFE4 /* code-stubs-ia32.cc */,
-				C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */,
-				C68081AC1225120B001EAFE4 /* code-stubs-arm.h */,
-				897FF1750E719B8F00D62E90 /* SConscript */,
 				897FF0F60E719B8F00D62E90 /* accessors.cc */,
 				897FF0F70E719B8F00D62E90 /* accessors.h */,
 				897FF0F80E719B8F00D62E90 /* allocation.cc */,
 				897FF0F90E719B8F00D62E90 /* allocation.h */,
 				897FF0FA0E719B8F00D62E90 /* api.cc */,
-				C2D1E9711212F27B00187A52 /* objects-visiting.cc */,
-				C2D1E9721212F27B00187A52 /* objects-visiting.h */,
 				897FF0FB0E719B8F00D62E90 /* api.h */,
 				893986D40F29020C007D5254 /* apiutils.h */,
 				897FF0FC0E719B8F00D62E90 /* arguments.h */,
@@ -742,10 +818,15 @@
 				897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
 				897FF1030E719B8F00D62E90 /* assembler.cc */,
 				897FF1040E719B8F00D62E90 /* assembler.h */,
-				C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */,
-				C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */,
+				893E248412B14B3D0083370F /* ast-inl.h */,
 				897FF1050E719B8F00D62E90 /* ast.cc */,
 				897FF1060E719B8F00D62E90 /* ast.h */,
+				893E248512B14B3D0083370F /* atomicops.h */,
+				893E24CF12B14B780083370F /* atomicops_internals_x86_macosx.h */,
+				893E248612B14B3D0083370F /* bignum-dtoa.cc */,
+				893E248712B14B3D0083370F /* bignum-dtoa.h */,
+				893E248812B14B3D0083370F /* bignum.cc */,
+				893E248912B14B3D0083370F /* bignum.h */,
 				897FF1070E719B8F00D62E90 /* bootstrapper.cc */,
 				897FF1080E719B8F00D62E90 /* bootstrapper.h */,
 				897FF1090E719B8F00D62E90 /* builtins-arm.cc */,
@@ -753,6 +834,7 @@
 				897FF10B0E719B8F00D62E90 /* builtins.cc */,
 				897FF10C0E719B8F00D62E90 /* builtins.h */,
 				89A15C630EE4661A00B48DEB /* bytecodes-irregexp.h */,
+				893E248A12B14B3D0083370F /* cached-powers.cc */,
 				9FA38B9B1175B2D200C4CD55 /* cached-powers.h */,
 				897FF10D0E719B8F00D62E90 /* char-predicates-inl.h */,
 				897FF10E0E719B8F00D62E90 /* char-predicates.h */,
@@ -761,6 +843,10 @@
 				9F2B370E114FF62D007CDAF4 /* circular-queue-inl.h */,
 				9F2B370F114FF62D007CDAF4 /* circular-queue.cc */,
 				9F2B3710114FF62D007CDAF4 /* circular-queue.h */,
+				C68081AB1225120B001EAFE4 /* code-stubs-arm.cc */,
+				C68081AC1225120B001EAFE4 /* code-stubs-arm.h */,
+				C68081B012251239001EAFE4 /* code-stubs-ia32.cc */,
+				C68081B412251257001EAFE4 /* code-stubs-ia32.h */,
 				897FF1110E719B8F00D62E90 /* code-stubs.cc */,
 				897FF1120E719B8F00D62E90 /* code-stubs.h */,
 				897FF1130E719B8F00D62E90 /* code.h */,
@@ -803,6 +889,10 @@
 				898BD20D0EF6CC850068B00A /* debug-ia32.cc */,
 				897FF1280E719B8F00D62E90 /* debug.cc */,
 				897FF1290E719B8F00D62E90 /* debug.h */,
+				893E24C612B14B510083370F /* deoptimizer-arm.cc */,
+				893E24D012B14B8A0083370F /* deoptimizer-ia32.cc */,
+				893E248B12B14B3D0083370F /* deoptimizer.cc */,
+				893E248C12B14B3D0083370F /* deoptimizer.h */,
 				897FF12A0E719B8F00D62E90 /* disasm-arm.cc */,
 				897FF12B0E719B8F00D62E90 /* disasm-ia32.cc */,
 				897FF12C0E719B8F00D62E90 /* disasm.h */,
@@ -813,13 +903,16 @@
 				9FA38BA01175B2D200C4CD55 /* double.h */,
 				C2BD4BD5120165460046BF9F /* dtoa.cc */,
 				C2BD4BD6120165460046BF9F /* dtoa.h */,
-				897FF12F0E719B8F00D62E90 /* dtoa-config.c */,
 				897FF1300E719B8F00D62E90 /* execution.cc */,
 				897FF1310E719B8F00D62E90 /* execution.h */,
+				893E24D812B14B9F0083370F /* externalize-string-extension.cc */,
+				893E24D912B14B9F0083370F /* externalize-string-extension.h */,
 				897FF1320E719B8F00D62E90 /* factory.cc */,
 				897FF1330E719B8F00D62E90 /* factory.h */,
 				9FA38BA11175B2D200C4CD55 /* fast-dtoa.cc */,
 				9FA38BA21175B2D200C4CD55 /* fast-dtoa.h */,
+				C2BD4BD9120165A70046BF9F /* fixed-dtoa.cc */,
+				C2BD4BDA120165A70046BF9F /* fixed-dtoa.h */,
 				89471C7F0EB23EE400B6874B /* flag-definitions.h */,
 				897FF1350E719B8F00D62E90 /* flags.cc */,
 				897FF1360E719B8F00D62E90 /* flags.h */,
@@ -838,6 +931,8 @@
 				9FA38BA61175B2D200C4CD55 /* full-codegen.h */,
 				9F92FAA70F8F28AD0089F02C /* func-name-inferrer.cc */,
 				9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */,
+				893E24DA12B14B9F0083370F /* gc-extension.cc */,
+				893E24DB12B14B9F0083370F /* gc-extension.h */,
 				897FF13E0E719B8F00D62E90 /* global-handles.cc */,
 				897FF13F0E719B8F00D62E90 /* global-handles.h */,
 				897FF1400E719B8F00D62E90 /* globals.h */,
@@ -851,6 +946,10 @@
 				9F11D99F105AF0A300EBE5B2 /* heap-profiler.h */,
 				897FF1470E719B8F00D62E90 /* heap.cc */,
 				897FF1480E719B8F00D62E90 /* heap.h */,
+				893E248D12B14B3D0083370F /* hydrogen-instructions.cc */,
+				893E248E12B14B3D0083370F /* hydrogen-instructions.h */,
+				893E248F12B14B3D0083370F /* hydrogen.cc */,
+				893E249012B14B3D0083370F /* hydrogen.h */,
 				897FF1490E719B8F00D62E90 /* ic-arm.cc */,
 				897FF14A0E719B8F00D62E90 /* ic-ia32.cc */,
 				897FF14B0E719B8F00D62E90 /* ic-inl.h */,
@@ -862,19 +961,27 @@
 				897FF14F0E719B8F00D62E90 /* jsregexp.h */,
 				9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */,
 				895FA720107FFB15006F39D4 /* jump-target-heavy-inl.h */,
-				895FA720107FFB15006F39D4 /* jump-target-heavy-inl.h */,
-				895FA720107FFB15006F39D4 /* jump-target-heavy-inl.h */,
 				58950D4F0F55514900F3E8BA /* jump-target-heavy.cc */,
-				58950D4F0F55514900F3E8BA /* jump-target-heavy.cc */,
+				893E249112B14B3D0083370F /* jump-target-heavy.h */,
 				9FA38BC31175B2E500C4CD55 /* jump-target-ia32.cc */,
 				9FA38BA71175B2D200C4CD55 /* jump-target-inl.h */,
 				9FA38BA81175B2D200C4CD55 /* jump-target-light-inl.h */,
 				58950D4E0F55514900F3E8BA /* jump-target-light.cc */,
-				58950D4E0F55514900F3E8BA /* jump-target-light.cc */,
+				893E249212B14B3D0083370F /* jump-target-light.h */,
 				58950D500F55514900F3E8BA /* jump-target.cc */,
 				58950D510F55514900F3E8BA /* jump-target.h */,
 				897FF1500E719B8F00D62E90 /* list-inl.h */,
 				897FF1510E719B8F00D62E90 /* list.h */,
+				893E249312B14B3D0083370F /* lithium-allocator.cc */,
+				893E249412B14B3D0083370F /* lithium-allocator.h */,
+				893E24C712B14B510083370F /* lithium-arm.cc */,
+				893E24C812B14B510083370F /* lithium-arm.h */,
+				893E24C912B14B520083370F /* lithium-codegen-arm.cc */,
+				893E24CA12B14B520083370F /* lithium-codegen-arm.h */,
+				893E24D112B14B8A0083370F /* lithium-codegen-ia32.cc */,
+				893E24D212B14B8A0083370F /* lithium-codegen-ia32.h */,
+				893E24D312B14B8A0083370F /* lithium-ia32.cc */,
+				893E24D412B14B8A0083370F /* lithium-ia32.h */,
 				9FA38BA91175B2D200C4CD55 /* liveedit.cc */,
 				9FA38BAA1175B2D200C4CD55 /* liveedit.h */,
 				22A76C900FF259E600FDC694 /* log-inl.h */,
@@ -896,6 +1003,8 @@
 				897FF15F0E719B8F00D62E90 /* natives.h */,
 				897FF1600E719B8F00D62E90 /* objects-debug.cc */,
 				897FF1610E719B8F00D62E90 /* objects-inl.h */,
+				C2D1E9711212F27B00187A52 /* objects-visiting.cc */,
+				C2D1E9721212F27B00187A52 /* objects-visiting.h */,
 				897FF1620E719B8F00D62E90 /* objects.cc */,
 				897FF1630E719B8F00D62E90 /* objects.h */,
 				9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */,
@@ -909,7 +1018,11 @@
 				893A72230F7B0FF200303DD2 /* platform-posix.cc */,
 				897FF1690E719B8F00D62E90 /* platform-win32.cc */,
 				897FF16A0E719B8F00D62E90 /* platform.h */,
-				9FA38BAB1175B2D200C4CD55 /* powers-ten.h */,
+				893E249512B14B3D0083370F /* preparse-data.cc */,
+				893E249612B14B3D0083370F /* preparse-data.h */,
+				893E249712B14B3D0083370F /* preparser-api.cc */,
+				893E249812B14B3D0083370F /* preparser.cc */,
+				893E249912B14B3D0083370F /* preparser.h */,
 				897FF16B0E719B8F00D62E90 /* prettyprinter.cc */,
 				897FF16C0E719B8F00D62E90 /* prettyprinter.h */,
 				9F73E3AE114E61A100F84A5A /* profile-generator-inl.h */,
@@ -941,10 +1054,17 @@
 				58950D550F55514900F3E8BA /* register-allocator.h */,
 				897FF16F0E719B8F00D62E90 /* rewriter.cc */,
 				897FF1700E719B8F00D62E90 /* rewriter.h */,
+				893E249A12B14B3D0083370F /* runtime-profiler.cc */,
+				893E249B12B14B3D0083370F /* runtime-profiler.h */,
 				897FF1710E719B8F00D62E90 /* runtime.cc */,
 				897FF1720E719B8F00D62E90 /* runtime.h */,
+				893E249C12B14B3D0083370F /* safepoint-table.cc */,
+				893E249D12B14B3D0083370F /* safepoint-table.h */,
+				893E249E12B14B3D0083370F /* scanner-base.cc */,
+				893E249F12B14B3D0083370F /* scanner-base.h */,
 				897FF1730E719B8F00D62E90 /* scanner.cc */,
 				897FF1740E719B8F00D62E90 /* scanner.h */,
+				897FF1750E719B8F00D62E90 /* SConscript */,
 				897FF1760E719B8F00D62E90 /* scopeinfo.cc */,
 				897FF1770E719B8F00D62E90 /* scopeinfo.h */,
 				897FF1780E719B8F00D62E90 /* scopes.cc */,
@@ -956,6 +1076,7 @@
 				897FF17E0E719B8F00D62E90 /* simulator-arm.h */,
 				897FF17F0E719B8F00D62E90 /* simulator-ia32.cc */,
 				897FF1800E719B8F00D62E90 /* simulator-ia32.h */,
+				893E24A012B14B3D0083370F /* simulator.h */,
 				897FF1810E719B8F00D62E90 /* smart-pointer.h */,
 				897FF1820E719B8F00D62E90 /* snapshot-common.cc */,
 				897FF1830E719B8F00D62E90 /* snapshot-empty.cc */,
@@ -965,10 +1086,12 @@
 				897FF1870E719B8F00D62E90 /* spaces.h */,
 				9FA38BAC1175B2D200C4CD55 /* splay-tree-inl.h */,
 				9FA38BAD1175B2D200C4CD55 /* splay-tree.h */,
-				897FF1880E719B8F00D62E90 /* string-search.cc */,
-				897FF1890E719B8F00D62E90 /* string-search.h */,
+				893E24A112B14B3D0083370F /* string-search.cc */,
+				893E24A212B14B3D0083370F /* string-search.h */,
 				897FF1880E719B8F00D62E90 /* string-stream.cc */,
 				897FF1890E719B8F00D62E90 /* string-stream.h */,
+				893E24A312B14B3D0083370F /* strtod.cc */,
+				893E24A412B14B3D0083370F /* strtod.h */,
 				897FF18A0E719B8F00D62E90 /* stub-cache-arm.cc */,
 				897FF18B0E719B8F00D62E90 /* stub-cache-ia32.cc */,
 				897FF18C0E719B8F00D62E90 /* stub-cache.cc */,
@@ -990,23 +1113,25 @@
 				897FF19A0E719B8F00D62E90 /* v8-counters.h */,
 				897FF19B0E719B8F00D62E90 /* v8.cc */,
 				897FF19C0E719B8F00D62E90 /* v8.h */,
+				893E24A512B14B3D0083370F /* v8checks.h */,
+				893E24A612B14B3D0083370F /* v8globals.h */,
 				897FF19D0E719B8F00D62E90 /* v8threads.cc */,
 				897FF19E0E719B8F00D62E90 /* v8threads.h */,
+				893E24A712B14B3D0083370F /* v8utils.h */,
 				897FF19F0E719B8F00D62E90 /* variables.cc */,
 				897FF1A00E719B8F00D62E90 /* variables.h */,
 				897FF32F0FAA0ED200136CF6 /* version.cc */,
 				897FF3300FAA0ED200136CF6 /* version.h */,
+				893E24CB12B14B520083370F /* virtual-frame-arm-inl.h */,
 				9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */,
 				58950D570F55514900F3E8BA /* virtual-frame-arm.h */,
 				9FA38BB01175B2D200C4CD55 /* virtual-frame-heavy-inl.h */,
 				58950D580F55514900F3E8BA /* virtual-frame-heavy.cc */,
-				58950D580F55514900F3E8BA /* virtual-frame-heavy.cc */,
 				9FA38BC41175B2E500C4CD55 /* virtual-frame-ia32.cc */,
 				58950D590F55514900F3E8BA /* virtual-frame-ia32.h */,
 				9FA38BB11175B2D200C4CD55 /* virtual-frame-inl.h */,
 				9FA38BB21175B2D200C4CD55 /* virtual-frame-light-inl.h */,
 				58950D560F55514900F3E8BA /* virtual-frame-light.cc */,
-				58950D560F55514900F3E8BA /* virtual-frame-light.cc */,
 				58950D5A0F55514900F3E8BA /* virtual-frame.cc */,
 				58950D5B0F55514900F3E8BA /* virtual-frame.h */,
 				9FA37332116DD9F000C4CD55 /* vm-state-inl.h */,
@@ -1018,6 +1143,42 @@
 			name = "C++";
 			sourceTree = "<group>";
 		};
+		897FF0CF0E71996900D62E90 /* v8 */ = {
+			isa = PBXGroup;
+			children = (
+				897FF0D10E71999E00D62E90 /* include */,
+				897FF1B30E719BCE00D62E90 /* samples */,
+				897FF0D00E71999800D62E90 /* src */,
+				897FF1B40E719BE800D62E90 /* tools */,
+			);
+			name = v8;
+			path = ..;
+			sourceTree = "<group>";
+		};
+		897FF0D00E71999800D62E90 /* src */ = {
+			isa = PBXGroup;
+			children = (
+				893E24E212B14BD20083370F /* C++ */,
+				89A9C1630E71C8E300BE6CCA /* generated */,
+				897FF0D80E719ABA00D62E90 /* js */,
+				897FF0DE0E719B3400D62E90 /* third_party */,
+			);
+			path = src;
+			sourceTree = "<group>";
+		};
+		897FF0D10E71999E00D62E90 /* include */ = {
+			isa = PBXGroup;
+			children = (
+				893E248112B14AD40083370F /* v8-preparser.h */,
+				893E248212B14AD40083370F /* v8-testing.h */,
+				893E248312B14AD40083370F /* v8stdint.h */,
+				897FF0D40E719A8500D62E90 /* v8-debug.h */,
+				9FA36F62116BA26500C4CD55 /* v8-profiler.h */,
+				897FF0D50E719A8500D62E90 /* v8.h */,
+			);
+			path = include;
+			sourceTree = "<group>";
+		};
 		897FF0D80E719ABA00D62E90 /* js */ = {
 			isa = PBXGroup;
 			children = (
@@ -1080,10 +1241,11 @@
 			isa = PBXGroup;
 			children = (
 				897F767A0E71B4CC007ACF34 /* v8_shell */,
-				8939880B0F2A35FA007D5254 /* v8_shell */,
+				8939880B0F2A35FA007D5254 /* d8 */,
 				89F23C950E78D5B6006B2466 /* v8_shell-arm */,
 				89F23C870E78D5B2006B2466 /* libv8-arm.a */,
 				8970F2F00E719FB2006AE7B5 /* libv8.a */,
+				897C77D912B68E3D000767A8 /* d8-arm */,
 			);
 			name = Products;
 			sourceTree = "<group>";
@@ -1115,7 +1277,7 @@
 			);
 			name = d8_shell;
 			productName = v8_shell;
-			productReference = 8939880B0F2A35FA007D5254 /* v8_shell */;
+			productReference = 8939880B0F2A35FA007D5254 /* d8 */;
 			productType = "com.apple.product-type.tool";
 		};
 		8970F2EF0E719FB2006AE7B5 /* v8 */ = {
@@ -1135,6 +1297,24 @@
 			productReference = 8970F2F00E719FB2006AE7B5 /* libv8.a */;
 			productType = "com.apple.product-type.library.static";
 		};
+		897C77CB12B68E3D000767A8 /* d8_shell-arm */ = {
+			isa = PBXNativeTarget;
+			buildConfigurationList = 897C77D612B68E3D000767A8 /* Build configuration list for PBXNativeTarget "d8_shell-arm" */;
+			buildPhases = (
+				897C77CE12B68E3D000767A8 /* ShellScript */,
+				897C77CF12B68E3D000767A8 /* Sources */,
+				897C77D412B68E3D000767A8 /* Frameworks */,
+			);
+			buildRules = (
+			);
+			dependencies = (
+				897C77DC12B68E5D000767A8 /* PBXTargetDependency */,
+			);
+			name = "d8_shell-arm";
+			productName = v8_shell;
+			productReference = 897C77D912B68E3D000767A8 /* d8-arm */;
+			productType = "com.apple.product-type.tool";
+		};
 		897F76790E71B4CC007ACF34 /* v8_shell */ = {
 			isa = PBXNativeTarget;
 			buildConfigurationList = 897F767E0E71B4EA007ACF34 /* Build configuration list for PBXNativeTarget "v8_shell" */;
@@ -1193,7 +1373,14 @@
 			isa = PBXProject;
 			buildConfigurationList = 8915B86B0E719336009C4E19 /* Build configuration list for PBXProject "v8" */;
 			compatibilityVersion = "Xcode 3.1";
+			developmentRegion = English;
 			hasScannedForEncodings = 0;
+			knownRegions = (
+				English,
+				Japanese,
+				French,
+				German,
+			);
 			mainGroup = 8915B8660E719336009C4E19;
 			productRefGroup = 897FF1C00E719CB600D62E90 /* Products */;
 			projectDirPath = "";
@@ -1205,6 +1392,7 @@
 				893987FE0F2A35FA007D5254 /* d8_shell */,
 				89F23C3C0E78D5B2006B2466 /* v8-arm */,
 				89F23C880E78D5B6006B2466 /* v8_shell-arm */,
+				897C77CB12B68E3D000767A8 /* d8_shell-arm */,
 			);
 		};
 /* End PBXProject section */
@@ -1223,6 +1411,19 @@
 			shellPath = /bin/sh;
 			shellScript = "set -ex\nJS_FILES=\"d8.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nD8_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js.cc\"\nD8_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${D8_CC}.new\" \\\n  \"${D8_EMPTY_CC}.new\" \\\n  \"D8\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${D8_CC}.new\" \"${D8_CC}\" >& /dev/null ; then\n  mv \"${D8_CC}.new\" \"${D8_CC}\"\nelse\n  rm \"${D8_CC}.new\"\nfi\n\nif ! diff -q \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\"\nelse\n  rm \"${D8_EMPTY_CC}.new\"\nfi\n";
 		};
+		897C77CE12B68E3D000767A8 /* ShellScript */ = {
+			isa = PBXShellScriptBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+			);
+			inputPaths = (
+			);
+			outputPaths = (
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+			shellPath = /bin/sh;
+			shellScript = "set -ex\nJS_FILES=\"d8.js\"\\\n\" macros.py\"\n\nV8ROOT=\"${SRCROOT}/..\"\n\nSRC_DIR=\"${V8ROOT}/src\"\n\nNATIVE_JS_FILES=\"\"\n\nfor i in ${JS_FILES} ; do\n  NATIVE_JS_FILES+=\"${SRC_DIR}/${i} \"\ndone\n\nV8_GENERATED_SOURCES_DIR=\"${CONFIGURATION_TEMP_DIR}/generated\"\nmkdir -p \"${V8_GENERATED_SOURCES_DIR}\"\n\nD8_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js.cc\"\nD8_EMPTY_CC=\"${V8_GENERATED_SOURCES_DIR}/d8-js-empty.cc\"\n\npython \"${V8ROOT}/tools/js2c.py\" \\\n  \"${D8_CC}.new\" \\\n  \"${D8_EMPTY_CC}.new\" \\\n  \"D8\" \\\n  ${NATIVE_JS_FILES}\n\n# Only use the new files if they're different from the existing files (if any),\n# preserving the existing files' timestamps when there are no changes.  This\n# minimizes unnecessary build activity for a no-change build.\n\nif ! diff -q \"${D8_CC}.new\" \"${D8_CC}\" >& /dev/null ; then\n  mv \"${D8_CC}.new\" \"${D8_CC}\"\nelse\n  rm \"${D8_CC}.new\"\nfi\n\nif ! diff -q \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\" >& /dev/null ; then\n  mv \"${D8_EMPTY_CC}.new\" \"${D8_EMPTY_CC}\"\nelse\n  rm \"${D8_EMPTY_CC}.new\"\nfi\n";
+		};
 		89EA6FB50E71AA1F00F59E1B /* ShellScript */ = {
 			isa = PBXShellScriptBuildPhase;
 			buildActionMask = 2147483647;
@@ -1299,7 +1500,6 @@
 				89A88E000E71A6540043BA31 /* disasm-ia32.cc in Sources */,
 				89A88E010E71A6550043BA31 /* disassembler.cc in Sources */,
 				9FA38BBB1175B2D200C4CD55 /* diy-fp.cc in Sources */,
-				89A88E020E71A65A0043BA31 /* dtoa-config.c in Sources */,
 				89A88E030E71A65B0043BA31 /* execution.cc in Sources */,
 				89A88E040E71A65D0043BA31 /* factory.cc in Sources */,
 				9FA38BBC1175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
@@ -1355,7 +1555,7 @@
 				89A88E1F0E71A6B40043BA31 /* snapshot-common.cc in Sources */,
 				89A88E200E71A6B60043BA31 /* snapshot-empty.cc in Sources */,
 				89A88E210E71A6B70043BA31 /* spaces.cc in Sources */,
-				89A88E220E71A6BC0043BA31 /* string-search.cc in Sources */,
+				89A88E220E71A6BC0043BA31 /* string-stream.cc in Sources */,
 				89A88E220E71A6BC0043BA31 /* string-stream.cc in Sources */,
 				89A88E230E71A6BE0043BA31 /* stub-cache-ia32.cc in Sources */,
 				89A88E240E71A6BF0043BA31 /* stub-cache.cc in Sources */,
@@ -1375,6 +1575,37 @@
 				58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */,
 				89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
 				C68081B112251239001EAFE4 /* code-stubs-ia32.cc in Sources */,
+				893E24B712B14B3D0083370F /* bignum-dtoa.cc in Sources */,
+				893E24B812B14B3D0083370F /* bignum.cc in Sources */,
+				893E24B912B14B3D0083370F /* cached-powers.cc in Sources */,
+				893E24BA12B14B3D0083370F /* deoptimizer.cc in Sources */,
+				893E24BB12B14B3D0083370F /* hydrogen-instructions.cc in Sources */,
+				893E24BC12B14B3D0083370F /* hydrogen.cc in Sources */,
+				893E24BD12B14B3D0083370F /* lithium-allocator.cc in Sources */,
+				893E24BE12B14B3D0083370F /* preparse-data.cc in Sources */,
+				893E24BF12B14B3D0083370F /* preparser-api.cc in Sources */,
+				893E24C012B14B3D0083370F /* preparser.cc in Sources */,
+				893E24C112B14B3D0083370F /* runtime-profiler.cc in Sources */,
+				893E24C212B14B3D0083370F /* safepoint-table.cc in Sources */,
+				893E24C312B14B3D0083370F /* scanner-base.cc in Sources */,
+				893E24C412B14B3D0083370F /* string-search.cc in Sources */,
+				893E24C512B14B3D0083370F /* strtod.cc in Sources */,
+				893E24D512B14B8A0083370F /* deoptimizer-ia32.cc in Sources */,
+				893E24D612B14B8A0083370F /* lithium-codegen-ia32.cc in Sources */,
+				893E24D712B14B8A0083370F /* lithium-ia32.cc in Sources */,
+				893E24DC12B14B9F0083370F /* externalize-string-extension.cc in Sources */,
+				893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */,
+			);
+			runOnlyForDeploymentPostprocessing = 0;
+		};
+		897C77CF12B68E3D000767A8 /* Sources */ = {
+			isa = PBXSourcesBuildPhase;
+			buildActionMask = 2147483647;
+			files = (
+				897C77D012B68E3D000767A8 /* d8-debug.cc in Sources */,
+				897C77D112B68E3D000767A8 /* d8-js.cc in Sources */,
+				897C77D212B68E3D000767A8 /* d8-posix.cc in Sources */,
+				897C77D312B68E3D000767A8 /* d8.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -1423,7 +1654,6 @@
 				89F23C9B0E78D5EE006B2466 /* disasm-arm.cc in Sources */,
 				89F23C540E78D5B2006B2466 /* disassembler.cc in Sources */,
 				9FA38BB41175B2D200C4CD55 /* diy-fp.cc in Sources */,
-				89F23C550E78D5B2006B2466 /* dtoa-config.c in Sources */,
 				89F23C560E78D5B2006B2466 /* execution.cc in Sources */,
 				89F23C570E78D5B2006B2466 /* factory.cc in Sources */,
 				9FA38BB51175B2D200C4CD55 /* fast-dtoa.cc in Sources */,
@@ -1480,7 +1710,7 @@
 				89F23C730E78D5B2006B2466 /* snapshot-common.cc in Sources */,
 				89F23C740E78D5B2006B2466 /* snapshot-empty.cc in Sources */,
 				89F23C750E78D5B2006B2466 /* spaces.cc in Sources */,
-				89F23C760E78D5B2006B2466 /* string-search.cc in Sources */,
+				89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */,
 				89F23C760E78D5B2006B2466 /* string-stream.cc in Sources */,
 				89F23CA00E78D609006B2466 /* stub-cache-arm.cc in Sources */,
 				89F23C780E78D5B2006B2466 /* stub-cache.cc in Sources */,
@@ -1500,6 +1730,26 @@
 				58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */,
 				89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
 				C68081AD1225120B001EAFE4 /* code-stubs-arm.cc in Sources */,
+				893E24A812B14B3D0083370F /* bignum-dtoa.cc in Sources */,
+				893E24A912B14B3D0083370F /* bignum.cc in Sources */,
+				893E24AA12B14B3D0083370F /* cached-powers.cc in Sources */,
+				893E24AB12B14B3D0083370F /* deoptimizer.cc in Sources */,
+				893E24AC12B14B3D0083370F /* hydrogen-instructions.cc in Sources */,
+				893E24AD12B14B3D0083370F /* hydrogen.cc in Sources */,
+				893E24AE12B14B3D0083370F /* lithium-allocator.cc in Sources */,
+				893E24AF12B14B3D0083370F /* preparse-data.cc in Sources */,
+				893E24B012B14B3D0083370F /* preparser-api.cc in Sources */,
+				893E24B112B14B3D0083370F /* preparser.cc in Sources */,
+				893E24B212B14B3D0083370F /* runtime-profiler.cc in Sources */,
+				893E24B312B14B3D0083370F /* safepoint-table.cc in Sources */,
+				893E24B412B14B3D0083370F /* scanner-base.cc in Sources */,
+				893E24B512B14B3D0083370F /* string-search.cc in Sources */,
+				893E24B612B14B3D0083370F /* strtod.cc in Sources */,
+				893E24CC12B14B520083370F /* deoptimizer-arm.cc in Sources */,
+				893E24CD12B14B520083370F /* lithium-arm.cc in Sources */,
+				893E24CE12B14B520083370F /* lithium-codegen-arm.cc in Sources */,
+				893E24DE12B14B9F0083370F /* externalize-string-extension.cc in Sources */,
+				893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -1549,11 +1799,21 @@
 			target = 89F23C880E78D5B6006B2466 /* v8_shell-arm */;
 			targetProxy = 896FD03F0E78D735003DFB6A /* PBXContainerItemProxy */;
 		};
+		897C77DC12B68E5D000767A8 /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 89F23C3C0E78D5B2006B2466 /* v8-arm */;
+			targetProxy = 897C77DB12B68E5D000767A8 /* PBXContainerItemProxy */;
+		};
 		897F76830E71B6AC007ACF34 /* PBXTargetDependency */ = {
 			isa = PBXTargetDependency;
 			target = 8970F2EF0E719FB2006AE7B5 /* v8 */;
 			targetProxy = 897F76820E71B6AC007ACF34 /* PBXContainerItemProxy */;
 		};
+		89EED40D12B69A0A0075BE1C /* PBXTargetDependency */ = {
+			isa = PBXTargetDependency;
+			target = 897C77CB12B68E3D000767A8 /* d8_shell-arm */;
+			targetProxy = 89EED40C12B69A0A0075BE1C /* PBXContainerItemProxy */;
+		};
 /* End PBXTargetDependency section */
 
 /* Begin XCBuildConfiguration section */
@@ -1590,6 +1850,7 @@
 					DEBUG,
 					ENABLE_LOGGING_AND_PROFILING,
 					V8_ENABLE_CHECKS,
+                                        OBJECT_PRINT,
 					ENABLE_VMSTATE_TRACKING,
 				);
 				GCC_SYMBOLS_PRIVATE_EXTERN = YES;
@@ -1654,10 +1915,11 @@
 					V8_TARGET_ARCH_IA32,
 					DEBUG,
 					V8_ENABLE_CHECKS,
+                                        OBJECT_PRINT,
 					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = v8_shell;
+				PRODUCT_NAME = d8;
 			};
 			name = Debug;
 		};
@@ -1671,7 +1933,7 @@
 					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
-				PRODUCT_NAME = v8_shell;
+				PRODUCT_NAME = d8;
 			};
 			name = Release;
 		};
@@ -1708,6 +1970,36 @@
 			};
 			name = Release;
 		};
+		897C77D712B68E3D000767A8 /* Debug */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_IA32,
+					DEBUG,
+					V8_ENABLE_CHECKS,
+                                        OBJECT_PRINT,
+					ENABLE_DEBUGGER_SUPPORT,
+				);
+				HEADER_SEARCH_PATHS = ../src;
+				PRODUCT_NAME = "d8-arm";
+			};
+			name = Debug;
+		};
+		897C77D812B68E3D000767A8 /* Release */ = {
+			isa = XCBuildConfiguration;
+			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_IA32,
+					NDEBUG,
+					ENABLE_DEBUGGER_SUPPORT,
+				);
+				HEADER_SEARCH_PATHS = ../src;
+				PRODUCT_NAME = "d8-arm";
+			};
+			name = Release;
+		};
 		897F767C0E71B4CC007ACF34 /* Debug */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
@@ -1826,6 +2118,15 @@
 			defaultConfigurationIsVisible = 0;
 			defaultConfigurationName = Release;
 		};
+		897C77D612B68E3D000767A8 /* Build configuration list for PBXNativeTarget "d8_shell-arm" */ = {
+			isa = XCConfigurationList;
+			buildConfigurations = (
+				897C77D712B68E3D000767A8 /* Debug */,
+				897C77D812B68E3D000767A8 /* Release */,
+			);
+			defaultConfigurationIsVisible = 0;
+			defaultConfigurationName = Release;
+		};
 		897F767E0E71B4EA007ACF34 /* Build configuration list for PBXNativeTarget "v8_shell" */ = {
 			isa = XCConfigurationList;
 			buildConfigurations = (
diff --git a/tools/visual_studio/debug.vsprops b/tools/visual_studio/debug.vsprops
index 5e3555a..60b79fe 100644
--- a/tools/visual_studio/debug.vsprops
+++ b/tools/visual_studio/debug.vsprops
@@ -7,7 +7,7 @@
 	<Tool
 		Name="VCCLCompilerTool"
 		Optimization="0"
-		PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER;V8_ENABLE_CHECKS"
+		PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER;V8_ENABLE_CHECKS,OBJECT_PRINT"
 		RuntimeLibrary="1"
 	/>
 	<Tool
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 902faff..e53b3fc 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -1,1196 +1,1280 @@
 <?xml version="1.0" encoding="Windows-1252"?>
 <VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_base"
-	ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-	RootNamespace="v8_base"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="src"
-			>
-			<File
-				RelativePath="..\..\src\accessors.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\accessors.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\allocation.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\allocation.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\api.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\api.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arguments.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\assembler-ia32-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\assembler-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\assembler-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-stack.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\assembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bootstrapper.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bootstrapper.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\builtins-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\builtins.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\builtins.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bytecodes-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cached-powers.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cached-powers.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\char-predicates-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\char-predicates.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\checks.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\checks.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code-stubs.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code-stubs.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\code-stubs-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\code-stubs-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\codegen-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\codegen-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compilation-cache.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compilation-cache.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\contexts.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\contexts.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\counters.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\counters.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\cpu-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\data-flow.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\data-flow.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dateparser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dateparser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-agent.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-agent.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\debug-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\disassembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\disassembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\diy-fp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\diy-fp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\double.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\execution.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\execution.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\factory.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\factory.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fast-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fast-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fixed-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fixed-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flags.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flags.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frame-element.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frame-element.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\frames-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\frames-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames.h"
-				>
-			</File>
-                        <File
-                                RelativePath="..\..\src\ia32\full-codegen-ia32.cc"
-                                >
-                        </File>
-                        <File
-                                RelativePath="..\..\src\full-codegen.cc"
-                                >
-                        </File>
-                        <File
-                                RelativePath="..\..\src\full-codegen.h"
-                                >
-                        </File>
-			<File
-				RelativePath="..\..\src\func-name-inferrer.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\func-name-inferrer.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\global-handles.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\global-handles.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\globals.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\hashmap.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\hashmap.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-profiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\ic-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interceptors.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interpreter-irregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interpreter-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-heavy-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-heavy.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\jump-target-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-heavy.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jsregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jsregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\list-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\list.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-utils.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\macro-assembler-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\macro-assembler-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\macro-assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mark-compact.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mark-compact.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\memory.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\natives.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-debug.cc"
-				>
-				<FileConfiguration
-					Name="Release|Win32"
-					ExcludedFromBuild="true"
-					>
-					<Tool
-						Name="VCCLCompilerTool"
-					/>
-				</FileConfiguration>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-visiting.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-visiting.h"
-				>
-			</File>
+  ProjectType="Visual C++"
+  Version="8.00"
+  Name="v8_base"
+  ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
+  RootNamespace="v8_base"
+  Keyword="Win32Proj"
+  >
+  <Platforms>
+    <Platform
+      Name="Win32"
+    />
+  </Platforms>
+  <ToolFiles>
+  </ToolFiles>
+  <Configurations>
+    <Configuration
+      Name="Debug|Win32"
+      ConfigurationType="4"
+      InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\debug.vsprops"
+      >
+      <Tool
+        Name="VCPreBuildEventTool"
+      />
+      <Tool
+        Name="VCCustomBuildTool"
+      />
+      <Tool
+        Name="VCXMLDataGeneratorTool"
+      />
+      <Tool
+        Name="VCWebServiceProxyGeneratorTool"
+      />
+      <Tool
+        Name="VCMIDLTool"
+      />
+      <Tool
+        Name="VCCLCompilerTool"
+      />
+      <Tool
+        Name="VCManagedResourceCompilerTool"
+      />
+      <Tool
+        Name="VCResourceCompilerTool"
+      />
+      <Tool
+        Name="VCPreLinkEventTool"
+      />
+      <Tool
+        Name="VCLibrarianTool"
+      />
+      <Tool
+        Name="VCALinkTool"
+      />
+      <Tool
+        Name="VCXDCMakeTool"
+      />
+      <Tool
+        Name="VCBscMakeTool"
+      />
+      <Tool
+        Name="VCFxCopTool"
+      />
+      <Tool
+        Name="VCPostBuildEventTool"
+      />
+    </Configuration>
+    <Configuration
+      Name="Release|Win32"
+      ConfigurationType="4"
+      InheritedPropertySheets=".\common.vsprops;.\ia32.vsprops;.\release.vsprops"
+      >
+      <Tool
+        Name="VCPreBuildEventTool"
+      />
+      <Tool
+        Name="VCCustomBuildTool"
+      />
+      <Tool
+        Name="VCXMLDataGeneratorTool"
+      />
+      <Tool
+        Name="VCWebServiceProxyGeneratorTool"
+      />
+      <Tool
+        Name="VCMIDLTool"
+      />
+      <Tool
+        Name="VCCLCompilerTool"
+      />
+      <Tool
+        Name="VCManagedResourceCompilerTool"
+      />
+      <Tool
+        Name="VCResourceCompilerTool"
+      />
+      <Tool
+        Name="VCPreLinkEventTool"
+      />
+      <Tool
+        Name="VCLibrarianTool"
+      />
+      <Tool
+        Name="VCALinkTool"
+      />
+      <Tool
+        Name="VCXDCMakeTool"
+      />
+      <Tool
+        Name="VCBscMakeTool"
+      />
+      <Tool
+        Name="VCFxCopTool"
+      />
+      <Tool
+        Name="VCPostBuildEventTool"
+      />
+    </Configuration>
+  </Configurations>
+  <References>
+  </References>
+  <Files>
+    <Filter
+      Name="src"
+      >
+      <File
+        RelativePath="..\..\src\accessors.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\accessors.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\allocation.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\allocation.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\api.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\api.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\atomicops_internals_x86_msvc.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arguments.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\assembler-ia32-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\assembler-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\assembler-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-stack.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\assembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bootstrapper.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bootstrapper.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\builtins-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\builtins.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\builtins.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bytecodes-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cached-powers.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cached-powers.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\char-predicates-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\char-predicates.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\checks.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\checks.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code-stubs.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code-stubs.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\code-stubs-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\code-stubs-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\codegen-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\codegen-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compilation-cache.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compilation-cache.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\contexts.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\contexts.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\counters.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\counters.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\cpu-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\data-flow.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\data-flow.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dateparser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dateparser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug-agent.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug-agent.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\debug-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\deoptimizer-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\deoptimizer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\deoptimizer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\disassembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\disassembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\diy-fp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\diy-fp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\double.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\execution.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\execution.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\factory.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\factory.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fast-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fast-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fixed-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fixed-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flags.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flags.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frame-element.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frame-element.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\frames-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\frames-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\full-codegen-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\full-codegen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\full-codegen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\func-name-inferrer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\func-name-inferrer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\global-handles.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\global-handles.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\globals.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hashmap.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hashmap.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen-instructions.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen-instructions.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\ic-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interceptors.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interpreter-irregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interpreter-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-heavy-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-heavy.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\jump-target-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-heavy.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jsregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jsregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\list-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\list.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium-allocator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium-allocator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\lithium-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\lithium-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\lithium-codegen-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\lithium-codegen-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\liveedit.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\liveedit.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-utils.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\macro-assembler-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\macro-assembler-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\macro-assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\mark-compact.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\mark-compact.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\memory.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\messages.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\messages.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\natives.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-debug.cc"
+        >
+        <FileConfiguration
+          Name="Release|Win32"
+          ExcludedFromBuild="true"
+          >
+          <Tool
+            Name="VCCLCompilerTool"
+          />
+        </FileConfiguration>
+      </File>
+      <File
+        RelativePath="..\..\src\objects-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-visiting.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-visiting.h"
+        >
+      </File>
 
-			<File
-				RelativePath="..\..\src\objects.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\oprofile-agent.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\oprofile-agent.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\parser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\parser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\preparser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\preparser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\preparse-data.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\preparse-data.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\platform-win32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\platform.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\prettyprinter.cc"
-				>
-				<FileConfiguration
-					Name="Release|Win32"
-					ExcludedFromBuild="true"
-					>
-					<Tool
-						Name="VCCLCompilerTool"
-					/>
-				</FileConfiguration>
-			</File>
-			<File
-				RelativePath="..\..\src\prettyprinter.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\property.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\property.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\regexp-macro-assembler-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\regexp-macro-assembler-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-stack.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\register-allocator.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\register-allocator.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\register-allocator-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\rewriter.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\rewriter.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner-base.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner-base.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopeinfo.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopeinfo.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopes.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopes.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\serialize.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\serialize.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\shell.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\snapshot-common.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\snapshot.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-search.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-search.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-stream.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-stream.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\strtod.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\strtod.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\stub-cache-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\stub-cache.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\stub-cache.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\token.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\token.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\top.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\top.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\type-info.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\type-info.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unbound-queue-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unbound-queue.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unicode-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unicode.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\utils.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8-counters.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8-counters.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8checks.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8globals.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8threads.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8threads.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\variables.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\variables.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\version.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\version.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-heavy-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\virtual-frame-ia32.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ia32\virtual-frame-ia32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-heavy.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\vm-state-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\vm-state.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\extensions\externalize-string-extension.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\extensions\externalize-string-extension.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\extensions\gc-extension.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\extensions\gc-extension.h"
-				>
-			</File>
-			<Filter
-				Name="third party"
-				>
-				<File
-					RelativePath="..\..\src\ia32\disasm-ia32.cc"
-					>
-				</File>
-				<File
-					RelativePath="..\..\src\disasm.h"
-					>
-				</File>
-			</Filter>
-			<Filter
-				Name="generated files"
-				>
-				<File
-					RelativePath="..\..\src\unicode.cc"
-					>
-				</File>
-			</Filter>
-		</Filter>
-		<Filter
-			Name="include"
-			>
-			<File
-				RelativePath="..\..\include\v8-debug.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8stdint.h"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
+      <File
+        RelativePath="..\..\src\objects.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\oprofile-agent.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\oprofile-agent.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\parser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\parser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparse-data.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\preparse-data.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\platform-win32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\platform.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\prettyprinter.cc"
+        >
+        <FileConfiguration
+          Name="Release|Win32"
+          ExcludedFromBuild="true"
+          >
+          <Tool
+            Name="VCCLCompilerTool"
+          />
+        </FileConfiguration>
+      </File>
+      <File
+        RelativePath="..\..\src\prettyprinter.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\property.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\property.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\regexp-macro-assembler-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\regexp-macro-assembler-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-stack.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\register-allocator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\register-allocator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\register-allocator-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\rewriter.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\rewriter.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\safepoint-table.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\safepoint-table.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner-base.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner-base.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopeinfo.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopeinfo.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopes.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopes.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\serialize.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\serialize.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\shell.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\snapshot-common.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\snapshot.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-search.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-search.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-stream.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-stream.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\strtod.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\strtod.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\stub-cache-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\stub-cache.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\stub-cache.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\token.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\token.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\top.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\top.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\type-info.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\type-info.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unbound-queue-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unbound-queue.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unicode-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unicode.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\utils.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8-counters.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8-counters.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8checks.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8globals.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8threads.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8threads.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\variables.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\variables.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\version.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\version.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-heavy-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\virtual-frame-ia32.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ia32\virtual-frame-ia32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-heavy.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\vm-state-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\vm-state.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\externalize-string-extension.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\externalize-string-extension.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\gc-extension.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\extensions\gc-extension.h"
+        >
+      </File>
+      <Filter
+        Name="third party"
+        >
+        <File
+          RelativePath="..\..\src\ia32\disasm-ia32.cc"
+          >
+        </File>
+        <File
+          RelativePath="..\..\src\disasm.h"
+          >
+        </File>
+      </Filter>
+      <Filter
+        Name="generated files"
+        >
+        <File
+          RelativePath="..\..\src\unicode.cc"
+          >
+        </File>
+      </Filter>
+    </Filter>
+    <Filter
+      Name="include"
+      >
+      <File
+        RelativePath="..\..\include\v8-debug.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8-testing.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8stdint.h"
+        >
+      </File>
+    </Filter>
+  </Files>
+  <Globals>
+  </Globals>
 </VisualStudioProject>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index b87fdf8..1054958 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -1,1166 +1,1234 @@
 <?xml version="1.0" encoding="Windows-1252"?>
 <VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_base"
-	ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-	RootNamespace="v8_base"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="Win32"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\debug.vsprops;.\arm.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|Win32"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\release.vsprops;.\arm.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="src"
-			>
-			<File
-				RelativePath="..\..\src\accessors.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\accessors.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\allocation.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\allocation.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\api.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\api.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arguments.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\assembler-arm-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\assembler-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\assembler-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-stack.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\assembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bootstrapper.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bootstrapper.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\builtins-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\builtins.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\builtins.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bytecodes-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cached-powers.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cached-powers.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\char-predicates-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\char-predicates.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\checks.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\checks.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code-stubs.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code-stubs.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\code-stubs-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\code-stubs-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\codegen-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\codegen-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compilation-cache.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compilation-cache.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\constants-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\constants-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\contexts.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\contexts.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\counters.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\counters.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\cpu-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\data-flow.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\data-flow.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dateparser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dateparser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-agent.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-agent.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\debug-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\disassembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\disassembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\diy-fp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\diy-fp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\double.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\execution.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\execution.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\factory.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\factory.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fast-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fast-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fixed-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fixed-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flags.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flags.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flow-graph.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flow-graph.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frame-element.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frame-element.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\frames-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\frames-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames.h"
-				>
-			</File>
-                        <File
-                                RelativePath="..\..\src\arm\full-codegen-arm.cc"
-                                >
-                        </File>
-                        <File
-                                RelativePath="..\..\src\full-codegen.cc"
-                                >
-                        </File>
-                        <File
-                                RelativePath="..\..\src\full-codegen.h"
-                                >
-                        </File>
-			<File
-				RelativePath="..\..\src\func-name-inferrer.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\func-name-inferrer.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\global-handles.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\global-handles.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\globals.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\hashmap.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\hashmap.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-profiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\ic-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interceptors.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interpreter-irregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interpreter-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-light-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-light.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\jump-target-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-light.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jsregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jsregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\list-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\list.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-utils.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\macro-assembler-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\macro-assembler-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\macro-assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mark-compact.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mark-compact.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\memory.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\natives.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-debug.cc"
-				>
-				<FileConfiguration
-					Name="Release|Win32"
-					ExcludedFromBuild="true"
-					>
-					<Tool
-						Name="VCCLCompilerTool"
-					/>
-				</FileConfiguration>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-visiting.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-visiting.h"
-				>
-			<File
-				RelativePath="..\..\src\objects.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\oprofile-agent.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\oprofile-agent.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\parser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\parser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\platform-win32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\platform.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\prettyprinter.cc"
-				>
-				<FileConfiguration
-					Name="Release|Win32"
-					ExcludedFromBuild="true"
-					>
-					<Tool
-						Name="VCCLCompilerTool"
-					/>
-				</FileConfiguration>
-			</File>
-			<File
-				RelativePath="..\..\src\prettyprinter.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\property.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\property.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\regexp-macro-assembler-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\regexp-macro-assembler-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-stack.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\register-allocator.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\register-allocator.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\register-allocator-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\rewriter.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\rewriter.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopeinfo.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopeinfo.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopes.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopes.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\serialize.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\serialize.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\shell.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\snapshot-common.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\snapshot.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\simulator-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\simulator-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-search.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-search.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-stream.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-stream.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\strtod.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\strtod.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\stub-cache-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\stub-cache.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\stub-cache.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\token.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\token.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\top.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\top.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\type-info.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\type-info.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unbound-queue-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unbound-queue.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unicode-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unicode.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\utils.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8-counters.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8-counters.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8threads.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8threads.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\variables.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\variables.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\version.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\version.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-light-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\virtual-frame-arm-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\virtual-frame-arm.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arm\virtual-frame-arm.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-light.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\vm-state-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\vm-state.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone.h"
-				>
-			</File>
-			<Filter
-				Name="third party"
-				>
-				<File
-					RelativePath="..\..\src\arm\disasm-arm.cc"
-					>
-				</File>
-				<File
-					RelativePath="..\..\src\disasm.h"
-					>
-				</File>
-			</Filter>
-			<Filter
-				Name="generated files"
-				>
-				<File
-					RelativePath="..\..\src\unicode.cc"
-					>
-				</File>
-			</Filter>
-		</Filter>
-		<Filter
-			Name="include"
-			>
-			<File
-				RelativePath="..\..\include\v8-debug.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8.h"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
+  ProjectType="Visual C++"
+  Version="8.00"
+  Name="v8_base"
+  ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
+  RootNamespace="v8_base"
+  Keyword="Win32Proj"
+  >
+  <Platforms>
+    <Platform
+      Name="Win32"
+    />
+  </Platforms>
+  <ToolFiles>
+  </ToolFiles>
+  <Configurations>
+    <Configuration
+      Name="Debug|Win32"
+      ConfigurationType="4"
+      InheritedPropertySheets=".\common.vsprops;.\debug.vsprops;.\arm.vsprops"
+      >
+      <Tool
+        Name="VCPreBuildEventTool"
+      />
+      <Tool
+        Name="VCCustomBuildTool"
+      />
+      <Tool
+        Name="VCXMLDataGeneratorTool"
+      />
+      <Tool
+        Name="VCWebServiceProxyGeneratorTool"
+      />
+      <Tool
+        Name="VCMIDLTool"
+      />
+      <Tool
+        Name="VCCLCompilerTool"
+      />
+      <Tool
+        Name="VCManagedResourceCompilerTool"
+      />
+      <Tool
+        Name="VCResourceCompilerTool"
+      />
+      <Tool
+        Name="VCPreLinkEventTool"
+      />
+      <Tool
+        Name="VCLibrarianTool"
+      />
+      <Tool
+        Name="VCALinkTool"
+      />
+      <Tool
+        Name="VCXDCMakeTool"
+      />
+      <Tool
+        Name="VCBscMakeTool"
+      />
+      <Tool
+        Name="VCFxCopTool"
+      />
+      <Tool
+        Name="VCPostBuildEventTool"
+      />
+    </Configuration>
+    <Configuration
+      Name="Release|Win32"
+      ConfigurationType="4"
+      InheritedPropertySheets=".\common.vsprops;.\release.vsprops;.\arm.vsprops"
+      >
+      <Tool
+        Name="VCPreBuildEventTool"
+      />
+      <Tool
+        Name="VCCustomBuildTool"
+      />
+      <Tool
+        Name="VCXMLDataGeneratorTool"
+      />
+      <Tool
+        Name="VCWebServiceProxyGeneratorTool"
+      />
+      <Tool
+        Name="VCMIDLTool"
+      />
+      <Tool
+        Name="VCCLCompilerTool"
+      />
+      <Tool
+        Name="VCManagedResourceCompilerTool"
+      />
+      <Tool
+        Name="VCResourceCompilerTool"
+      />
+      <Tool
+        Name="VCPreLinkEventTool"
+      />
+      <Tool
+        Name="VCLibrarianTool"
+      />
+      <Tool
+        Name="VCALinkTool"
+      />
+      <Tool
+        Name="VCXDCMakeTool"
+      />
+      <Tool
+        Name="VCBscMakeTool"
+      />
+      <Tool
+        Name="VCFxCopTool"
+      />
+      <Tool
+        Name="VCPostBuildEventTool"
+      />
+    </Configuration>
+  </Configurations>
+  <References>
+  </References>
+  <Files>
+    <Filter
+      Name="src"
+      >
+      <File
+        RelativePath="..\..\src\accessors.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\accessors.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\allocation.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\allocation.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\api.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\api.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\atomicops_internals_x86_msvc.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arguments.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\assembler-arm-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\assembler-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\assembler-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-stack.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\assembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bootstrapper.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bootstrapper.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\builtins-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\builtins.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\builtins.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bytecodes-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cached-powers.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cached-powers.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\char-predicates-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\char-predicates.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\checks.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\checks.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code-stubs.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code-stubs.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\code-stubs-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\code-stubs-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\codegen-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\codegen-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compilation-cache.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compilation-cache.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\constants-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\constants-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\contexts.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\contexts.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\counters.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\counters.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\cpu-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\data-flow.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\data-flow.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dateparser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dateparser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug-agent.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug-agent.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\debug-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\deoptimizer-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\deoptimizer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\deoptimizer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\disassembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\disassembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\diy-fp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\diy-fp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\double.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\execution.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\execution.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\factory.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\factory.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fast-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fast-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fixed-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fixed-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flags.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flags.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flow-graph.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flow-graph.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frame-element.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frame-element.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\frames-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\frames-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\full-codegen-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\full-codegen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\full-codegen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\func-name-inferrer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\func-name-inferrer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\global-handles.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\global-handles.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\globals.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hashmap.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hashmap.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\ic-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interceptors.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interpreter-irregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interpreter-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-light-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-light.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\jump-target-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-light.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jsregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jsregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\list-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\list.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium-allocator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium-allocator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\lithium-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\lithium-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\lithium-codegen-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\lithium-codegen-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\liveedit.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\liveedit.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-utils.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\macro-assembler-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\macro-assembler-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\macro-assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\mark-compact.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\mark-compact.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\memory.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\messages.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\messages.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\natives.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-debug.cc"
+        >
+        <FileConfiguration
+          Name="Release|Win32"
+          ExcludedFromBuild="true"
+          >
+          <Tool
+            Name="VCCLCompilerTool"
+          />
+        </FileConfiguration>
+      </File>
+      <File
+        RelativePath="..\..\src\objects-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-visiting.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-visiting.h"
+        >
+      <File
+        RelativePath="..\..\src\objects.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\oprofile-agent.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\oprofile-agent.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\parser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\parser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\platform-win32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\platform.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\prettyprinter.cc"
+        >
+        <FileConfiguration
+          Name="Release|Win32"
+          ExcludedFromBuild="true"
+          >
+          <Tool
+            Name="VCCLCompilerTool"
+          />
+        </FileConfiguration>
+      </File>
+      <File
+        RelativePath="..\..\src\prettyprinter.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\property.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\property.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\regexp-macro-assembler-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\regexp-macro-assembler-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-stack.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\register-allocator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\register-allocator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\register-allocator-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\rewriter.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\rewriter.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\safepoint-table.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\safepoint-table.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopeinfo.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopeinfo.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopes.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopes.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\serialize.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\serialize.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\shell.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\snapshot-common.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\snapshot.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\simulator-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\simulator-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-search.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-search.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-stream.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-stream.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\strtod.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\strtod.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\stub-cache-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\stub-cache.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\stub-cache.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\token.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\token.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\top.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\top.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\type-info.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\type-info.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unbound-queue-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unbound-queue.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unicode-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unicode.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\utils.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8-counters.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8-counters.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8threads.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8threads.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\variables.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\variables.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\version.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\version.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-light-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\virtual-frame-arm-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\virtual-frame-arm.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arm\virtual-frame-arm.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-light.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\vm-state-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\vm-state.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone.h"
+        >
+      </File>
+      <Filter
+        Name="third party"
+        >
+        <File
+          RelativePath="..\..\src\arm\disasm-arm.cc"
+          >
+        </File>
+        <File
+          RelativePath="..\..\src\disasm.h"
+          >
+        </File>
+      </Filter>
+      <Filter
+        Name="generated files"
+        >
+        <File
+          RelativePath="..\..\src\unicode.cc"
+          >
+        </File>
+      </Filter>
+    </Filter>
+    <Filter
+      Name="include"
+      >
+      <File
+        RelativePath="..\..\include\v8-debug.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8-testing.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8.h"
+        >
+      </File>
+    </Filter>
+  </Files>
+  <Globals>
+  </Globals>
 </VisualStudioProject>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 6d27472..28f299e 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -1,1148 +1,1200 @@
 <?xml version="1.0" encoding="Windows-1252"?>
 <VisualStudioProject
-	ProjectType="Visual C++"
-	Version="8.00"
-	Name="v8_base"
-	ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
-	RootNamespace="v8_base"
-	Keyword="Win32Proj"
-	>
-	<Platforms>
-		<Platform
-			Name="x64"
-		/>
-	</Platforms>
-	<ToolFiles>
-	</ToolFiles>
-	<Configurations>
-		<Configuration
-			Name="Debug|x64"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-		<Configuration
-			Name="Release|x64"
-			ConfigurationType="4"
-			InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
-			>
-			<Tool
-				Name="VCPreBuildEventTool"
-			/>
-			<Tool
-				Name="VCCustomBuildTool"
-			/>
-			<Tool
-				Name="VCXMLDataGeneratorTool"
-			/>
-			<Tool
-				Name="VCWebServiceProxyGeneratorTool"
-			/>
-			<Tool
-				Name="VCMIDLTool"
-			/>
-			<Tool
-				Name="VCCLCompilerTool"
-			/>
-			<Tool
-				Name="VCManagedResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCResourceCompilerTool"
-			/>
-			<Tool
-				Name="VCPreLinkEventTool"
-			/>
-			<Tool
-				Name="VCLibrarianTool"
-			/>
-			<Tool
-				Name="VCALinkTool"
-			/>
-			<Tool
-				Name="VCXDCMakeTool"
-			/>
-			<Tool
-				Name="VCBscMakeTool"
-			/>
-			<Tool
-				Name="VCFxCopTool"
-			/>
-			<Tool
-				Name="VCPostBuildEventTool"
-			/>
-		</Configuration>
-	</Configurations>
-	<References>
-	</References>
-	<Files>
-		<Filter
-			Name="src"
-			>
-			<File
-				RelativePath="..\..\src\accessors.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\accessors.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\allocation.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\allocation.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\api.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\api.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\arguments.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\assembler-x64-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\assembler-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\assembler-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-stack.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\assembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ast.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bignum-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bootstrapper.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bootstrapper.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\builtins-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\builtins.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\builtins.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\bytecodes-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cached-powers.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cached-powers.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\char-predicates-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\char-predicates.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\checks.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\checks.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\circular-queue.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code-stubs.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code-stubs.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\code-stubs-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\code-stubs-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\code.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\codegen-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\codegen-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\codegen.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compilation-cache.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compilation-cache.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\compiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\contexts.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\contexts.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\conversions.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\counters.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\counters.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\cpu-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\cpu-profiler-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\data-flow.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\data-flow.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dateparser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dateparser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-agent.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug-agent.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\debug-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\debug.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\disassembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\disassembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\diy-fp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\diy-fp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\double.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\execution.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\execution.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\factory.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\factory.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fast-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fast-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fixed-dtoa.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\fixed-dtoa.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flags.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flags.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flow-graph.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\flow-graph.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frame-element.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frame-element.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\frames-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\frames-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\frames.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\full-codegen-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\full-codegen.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\full-codegen.h"
-				>
-			</File>
-			<File
+  ProjectType="Visual C++"
+  Version="8.00"
+  Name="v8_base"
+  ProjectGUID="{EC8B7909-62AF-470D-A75D-E1D89C837142}"
+  RootNamespace="v8_base"
+  Keyword="Win32Proj"
+  >
+  <Platforms>
+    <Platform
+      Name="x64"
+    />
+  </Platforms>
+  <ToolFiles>
+  </ToolFiles>
+  <Configurations>
+    <Configuration
+      Name="Debug|x64"
+      ConfigurationType="4"
+      InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\debug.vsprops"
+      >
+      <Tool
+        Name="VCPreBuildEventTool"
+      />
+      <Tool
+        Name="VCCustomBuildTool"
+      />
+      <Tool
+        Name="VCXMLDataGeneratorTool"
+      />
+      <Tool
+        Name="VCWebServiceProxyGeneratorTool"
+      />
+      <Tool
+        Name="VCMIDLTool"
+      />
+      <Tool
+        Name="VCCLCompilerTool"
+      />
+      <Tool
+        Name="VCManagedResourceCompilerTool"
+      />
+      <Tool
+        Name="VCResourceCompilerTool"
+      />
+      <Tool
+        Name="VCPreLinkEventTool"
+      />
+      <Tool
+        Name="VCLibrarianTool"
+      />
+      <Tool
+        Name="VCALinkTool"
+      />
+      <Tool
+        Name="VCXDCMakeTool"
+      />
+      <Tool
+        Name="VCBscMakeTool"
+      />
+      <Tool
+        Name="VCFxCopTool"
+      />
+      <Tool
+        Name="VCPostBuildEventTool"
+      />
+    </Configuration>
+    <Configuration
+      Name="Release|x64"
+      ConfigurationType="4"
+      InheritedPropertySheets=".\common.vsprops;.\x64.vsprops;.\release.vsprops"
+      >
+      <Tool
+        Name="VCPreBuildEventTool"
+      />
+      <Tool
+        Name="VCCustomBuildTool"
+      />
+      <Tool
+        Name="VCXMLDataGeneratorTool"
+      />
+      <Tool
+        Name="VCWebServiceProxyGeneratorTool"
+      />
+      <Tool
+        Name="VCMIDLTool"
+      />
+      <Tool
+        Name="VCCLCompilerTool"
+      />
+      <Tool
+        Name="VCManagedResourceCompilerTool"
+      />
+      <Tool
+        Name="VCResourceCompilerTool"
+      />
+      <Tool
+        Name="VCPreLinkEventTool"
+      />
+      <Tool
+        Name="VCLibrarianTool"
+      />
+      <Tool
+        Name="VCALinkTool"
+      />
+      <Tool
+        Name="VCXDCMakeTool"
+      />
+      <Tool
+        Name="VCBscMakeTool"
+      />
+      <Tool
+        Name="VCFxCopTool"
+      />
+      <Tool
+        Name="VCPostBuildEventTool"
+      />
+    </Configuration>
+  </Configurations>
+  <References>
+  </References>
+  <Files>
+    <Filter
+      Name="src"
+      >
+      <File
+        RelativePath="..\..\src\accessors.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\accessors.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\allocation.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\allocation.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\api.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\api.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\arguments.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\assembler-x64-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\assembler-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\assembler-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-stack.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\assembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ast.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\atomicops_internals_x86_msvc.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bignum-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bootstrapper.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bootstrapper.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\builtins-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\builtins.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\builtins.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\bytecodes-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cached-powers.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cached-powers.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\char-predicates-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\char-predicates.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\checks.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\checks.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\circular-queue.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code-stubs.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code-stubs.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\code-stubs-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\code-stubs-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\code.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\codegen-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\codegen-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\codegen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compilation-cache.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compilation-cache.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\compiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\contexts.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\contexts.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\conversions.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\counters.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\counters.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\cpu-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\cpu-profiler-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\data-flow.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\data-flow.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dateparser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dateparser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug-agent.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug-agent.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\debug-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\debug.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\deoptimizer-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\deoptimizer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\deoptimizer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\disassembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\disassembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\diy-fp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\diy-fp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\double.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\execution.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\execution.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\factory.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\factory.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fast-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fast-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fixed-dtoa.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\fixed-dtoa.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flags.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flags.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flow-graph.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\flow-graph.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frame-element.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frame-element.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\frames-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\frames-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\frames.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\full-codegen-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\full-codegen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\full-codegen.h"
+        >
+      </File>
+      <File
 
-				RelativePath="..\..\src\func-name-inferrer.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\func-name-inferrer.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\global-handles.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\global-handles.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\globals.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\handles.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\hashmap.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\hashmap.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-profiler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\heap-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\ic-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\ic.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interceptors.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interpreter-irregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\interpreter-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-heavy-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-heavy.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\jump-target-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jump-target-heavy.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jsregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\jsregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\list-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\list.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\liveedit.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-utils.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\log-utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\macro-assembler-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\macro-assembler-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\macro-assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mark-compact.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\mark-compact.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\memory.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\messages.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\natives.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-debug.cc"
-				>
-				<FileConfiguration
-					Name="Release|x64"
-					ExcludedFromBuild="true"
-					>
-					<Tool
-						Name="VCCLCompilerTool"
-					/>
-				</FileConfiguration>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-visiting.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects-visiting.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\objects.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\oprofile-agent.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\oprofile-agent.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\parser.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\parser.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\profile-generator-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\platform-win32.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\platform.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\prettyprinter.cc"
-				>
-				<FileConfiguration
-					Name="Release|x64"
-					ExcludedFromBuild="true"
-					>
-					<Tool
-						Name="VCCLCompilerTool"
-					/>
-				</FileConfiguration>
-			</File>
-			<File
-				RelativePath="..\..\src\prettyprinter.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\property.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\property.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\regexp-macro-assembler-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\regexp-macro-assembler-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\regexp-stack.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\register-allocator.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\register-allocator.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\register-allocator-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\rewriter.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\rewriter.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\runtime.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scanner.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopeinfo.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopeinfo.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopes.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\scopes.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\serialize.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\serialize.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\shell.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\snapshot-common.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\snapshot.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\spaces.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-search.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-search.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-stream.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\string-stream.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\strtod.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\strtod.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\stub-cache-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\stub-cache.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\stub-cache.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\token.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\token.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\top.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\top.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\type-info.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\type-info.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unbound-queue-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unbound-queue.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unicode-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\unicode.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\utils.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\utils.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8-counters.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8-counters.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8threads.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\v8threads.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\variables.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\variables.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\version.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\version.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-heavy-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\virtual-frame-x64.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\x64\virtual-frame-x64.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\virtual-frame-heavy.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\vm-state-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\vm-state.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone-inl.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone.cc"
-				>
-			</File>
-			<File
-				RelativePath="..\..\src\zone.h"
-				>
-			</File>
-			<Filter
-				Name="third party"
-				>
-				<File
-					RelativePath="..\..\src\x64\disasm-x64.cc"
-					>
-				</File>
-				<File
-					RelativePath="..\..\src\disasm.h"
-					>
-				</File>
-			</Filter>
-			<Filter
-				Name="generated files"
-				>
-				<File
-					RelativePath="..\..\src\unicode.cc"
-					>
-				</File>
-			</Filter>
-		</Filter>
-		<Filter
-			Name="include"
-			>
-			<File
-				RelativePath="..\..\include\v8-debug.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8-profiler.h"
-				>
-			</File>
-			<File
-				RelativePath="..\..\include\v8.h"
-				>
-			</File>
-		</Filter>
-	</Files>
-	<Globals>
-	</Globals>
+        RelativePath="..\..\src\func-name-inferrer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\func-name-inferrer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\global-handles.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\global-handles.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\globals.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\handles.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hashmap.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hashmap.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\hydrogen.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\heap-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\ic-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\ic.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interceptors.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interpreter-irregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\interpreter-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-heavy-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-heavy.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\jump-target-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jump-target-heavy.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jsregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\jsregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\list-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\list.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium-allocator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\lithium-allocator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\liveedit.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\liveedit.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-utils.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\log-utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\macro-assembler-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\macro-assembler-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\macro-assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\mark-compact.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\mark-compact.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\memory.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\messages.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\messages.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\natives.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-debug.cc"
+        >
+        <FileConfiguration
+          Name="Release|x64"
+          ExcludedFromBuild="true"
+          >
+          <Tool
+            Name="VCCLCompilerTool"
+          />
+        </FileConfiguration>
+      </File>
+      <File
+        RelativePath="..\..\src\objects-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-visiting.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects-visiting.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\objects.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\oprofile-agent.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\oprofile-agent.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\parser.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\parser.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\profile-generator-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\platform-win32.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\platform.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\prettyprinter.cc"
+        >
+        <FileConfiguration
+          Name="Release|x64"
+          ExcludedFromBuild="true"
+          >
+          <Tool
+            Name="VCCLCompilerTool"
+          />
+        </FileConfiguration>
+      </File>
+      <File
+        RelativePath="..\..\src\prettyprinter.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\property.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\property.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\regexp-macro-assembler-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\regexp-macro-assembler-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-irregexp.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-tracer.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-macro-assembler-tracer.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\regexp-stack.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\register-allocator.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\register-allocator.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\register-allocator-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\rewriter.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\rewriter.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime-profiler.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\runtime-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\safepoint-table.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\safepoint-table.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scanner.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopeinfo.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopeinfo.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopes.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\scopes.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\serialize.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\serialize.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\shell.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\snapshot-common.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\snapshot.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\spaces.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-search.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-search.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-stream.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\string-stream.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\strtod.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\strtod.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\stub-cache-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\stub-cache.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\stub-cache.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\token.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\token.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\top.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\top.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\type-info.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\type-info.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unbound-queue-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unbound-queue.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unicode-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\unicode.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\utils.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\utils.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8-counters.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8-counters.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8threads.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\v8threads.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\variables.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\variables.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\version.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\version.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-heavy-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\virtual-frame-x64.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\x64\virtual-frame-x64.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\virtual-frame-heavy.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\vm-state-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\vm-state.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone-inl.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone.cc"
+        >
+      </File>
+      <File
+        RelativePath="..\..\src\zone.h"
+        >
+      </File>
+      <Filter
+        Name="third party"
+        >
+        <File
+          RelativePath="..\..\src\x64\disasm-x64.cc"
+          >
+        </File>
+        <File
+          RelativePath="..\..\src\disasm.h"
+          >
+        </File>
+      </Filter>
+      <Filter
+        Name="generated files"
+        >
+        <File
+          RelativePath="..\..\src\unicode.cc"
+          >
+        </File>
+      </Filter>
+    </Filter>
+    <Filter
+      Name="include"
+      >
+      <File
+        RelativePath="..\..\include\v8-debug.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8-profiler.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8-testing.h"
+        >
+      </File>
+      <File
+        RelativePath="..\..\include\v8.h"
+        >
+      </File>
+    </Filter>
+  </Files>
+  <Globals>
+  </Globals>
 </VisualStudioProject>
diff --git a/tools/visual_studio/v8_shell_sample.vcproj b/tools/visual_studio/v8_shell_sample.vcproj
index b1e5f01..4eb38bf 100644
--- a/tools/visual_studio/v8_shell_sample.vcproj
+++ b/tools/visual_studio/v8_shell_sample.vcproj
@@ -37,6 +37,7 @@
 			/>
 			<Tool
 				Name="VCCLCompilerTool"
+        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
 			/>
 			<Tool
 				Name="VCManagedResourceCompilerTool"
@@ -95,6 +96,7 @@
 			/>
 			<Tool
 				Name="VCCLCompilerTool"
+        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
 			/>
 			<Tool
 				Name="VCManagedResourceCompilerTool"
diff --git a/tools/visual_studio/v8_shell_sample_arm.vcproj b/tools/visual_studio/v8_shell_sample_arm.vcproj
index a14c91a..b4260e0 100644
--- a/tools/visual_studio/v8_shell_sample_arm.vcproj
+++ b/tools/visual_studio/v8_shell_sample_arm.vcproj
@@ -37,6 +37,7 @@
 			/>

 			<Tool

 				Name="VCCLCompilerTool"

+        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"

 			/>

 			<Tool

 				Name="VCManagedResourceCompilerTool"

@@ -95,6 +96,7 @@
 			/>

 			<Tool

 				Name="VCCLCompilerTool"

+        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"

 			/>

 			<Tool

 				Name="VCManagedResourceCompilerTool"

diff --git a/tools/visual_studio/v8_shell_sample_x64.vcproj b/tools/visual_studio/v8_shell_sample_x64.vcproj
index 44d7b12..9ba6703 100644
--- a/tools/visual_studio/v8_shell_sample_x64.vcproj
+++ b/tools/visual_studio/v8_shell_sample_x64.vcproj
@@ -37,7 +37,8 @@
 			/>
 			<Tool
 				Name="VCCLCompilerTool"
-			/>
+        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
+        />
 			<Tool
 				Name="VCManagedResourceCompilerTool"
 			/>
@@ -95,6 +96,7 @@
 			/>
 			<Tool
 				Name="VCCLCompilerTool"
+        AdditionalIncludeDirectories="$(ProjectDir)\..\..\include"
 			/>
 			<Tool
 				Name="VCManagedResourceCompilerTool"