Version 3.2.7

Disabled the original 'classic' V8 code generator.  Crankshaft is now the default on all platforms.

Changed the heap profiler to use more descriptive names.

Performance and stability improvements to isolates on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@7491 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index b5b4423..cfd18fa 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,13 @@
+2011-04-04: Version 3.2.7
+
+        Disabled the original 'classic' V8 code generator.  Crankshaft is
+        now the default on all platforms.
+
+        Changed the heap profiler to use more descriptive names.
+
+        Performance and stability improvements to isolates on all platforms.
+
+
 2011-03-30: Version 3.2.6
 
         Fixed xcode build warning in shell.cc (out of order initialization).
diff --git a/SConstruct b/SConstruct
index 2287c80..d92dd02 100644
--- a/SConstruct
+++ b/SConstruct
@@ -27,6 +27,7 @@
 
 import platform
 import re
+import subprocess
 import sys
 import os
 from os.path import join, dirname, abspath
@@ -145,6 +146,9 @@
       # Use visibility=default to disable this.
       'CXXFLAGS':     ['-fvisibility=hidden']
     },
+    'strictaliasing:off': {
+      'CCFLAGS':      ['-fno-strict-aliasing']
+    },
     'mode:debug': {
       'CCFLAGS':      ['-g', '-O0'],
       'CPPDEFINES':   ['ENABLE_DISASSEMBLER', 'DEBUG'],
@@ -826,8 +830,16 @@
   sys.exit(1)
 
 
-def GuessToolchain(os):
-  tools = Environment()['TOOLS']
+def GuessOS(env):
+  return utils.GuessOS()
+
+
+def GuessArch(env):
+  return utils.GuessArchitecture()
+
+
+def GuessToolchain(env):
+  tools = env['TOOLS']
   if 'gcc' in tools:
     return 'gcc'
   elif 'msvc' in tools:
@@ -836,7 +848,9 @@
     return None
 
 
-def GuessVisibility(os, toolchain):
+def GuessVisibility(env):
+  os = env['os']
+  toolchain = env['toolchain'];
   if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc':
     # MinGW / Cygwin can't do it.
     return 'default'
@@ -846,28 +860,41 @@
     return 'hidden'
 
 
-OS_GUESS = utils.GuessOS()
-TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
-ARCH_GUESS = utils.GuessArchitecture()
-VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS)
+def GuessStrictAliasing(env):
+  # There seems to be a problem with gcc 4.5.x.
+  # See http://code.google.com/p/v8/issues/detail?id=884
+  # It can be worked around by disabling strict aliasing.
+  toolchain = env['toolchain'];
+  if toolchain == 'gcc':
+    env = Environment(tools=['gcc'])
+    # The gcc version should be available in env['CCVERSION'],
+    # but when scons detects msvc this value is not set.
+    version = subprocess.Popen([env['CC'], '-dumpversion'],
+        stdout=subprocess.PIPE).communicate()[0]
+    if version.find('4.5') == 0:
+      return 'off'
+  return 'default'
 
 
-SIMPLE_OPTIONS = {
-  'toolchain': {
-    'values': ['gcc', 'msvc'],
-    'default': TOOLCHAIN_GUESS,
-    'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS
+PLATFORM_OPTIONS = {
+  'arch': {
+    'values': ['arm', 'ia32', 'x64', 'mips'],
+    'guess': GuessArch,
+    'help': 'the architecture to build for'
   },
   'os': {
     'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
-    'default': OS_GUESS,
-    'help': 'the os to build for (%s)' % OS_GUESS
+    'guess': GuessOS,
+    'help': 'the os to build for'
   },
-  'arch': {
-    'values':['arm', 'ia32', 'x64', 'mips'],
-    'default': ARCH_GUESS,
-    'help': 'the architecture to build for (%s)' % ARCH_GUESS
-  },
+  'toolchain': {
+    'values': ['gcc', 'msvc'],
+    'guess': GuessToolchain,
+    'help': 'the toolchain to use'
+  }
+}
+
+SIMPLE_OPTIONS = {
   'regexp': {
     'values': ['native', 'interpreted'],
     'default': 'native',
@@ -981,8 +1008,13 @@
   },
   'visibility': {
     'values': ['default', 'hidden'],
-    'default': VISIBILITY_GUESS,
-    'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS
+    'guess': GuessVisibility,
+    'help': 'shared library symbol visibility'
+  },
+  'strictaliasing': {
+    'values': ['default', 'off'],
+    'guess': GuessStrictAliasing,
+    'help': 'assume strict aliasing while optimizing'
   },
   'pgo': {
     'values': ['off', 'instrument', 'optimize'],
@@ -1001,6 +1033,22 @@
   }
 }
 
+ALL_OPTIONS = dict(PLATFORM_OPTIONS, **SIMPLE_OPTIONS)
+
+
+def AddOptions(options, result):
+  guess_env = Environment(options=result)
+  for (name, option) in options.iteritems():
+    if 'guess' in option:
+      # Option has a guess function
+      guess = option.get('guess')
+      default = guess(guess_env)
+    else:
+      # Option has a fixed default
+      default = option.get('default')
+    help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
+    result.Add(name, help, default)
+
 
 def GetOptions():
   result = Options()
@@ -1009,12 +1057,23 @@
   result.Add('cache', 'directory to use for scons build cache', '')
   result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
   result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
-  for (name, option) in SIMPLE_OPTIONS.iteritems():
-    help = '%s (%s)' % (name, ", ".join(option['values']))
-    result.Add(name, help, option.get('default'))
+  AddOptions(PLATFORM_OPTIONS, result)
+  AddOptions(SIMPLE_OPTIONS, result)
   return result
 
 
+def GetTools(opts):
+  env = Environment(options=opts)
+  os = env['os']
+  toolchain = env['toolchain']
+  if os == 'win32' and toolchain == 'gcc':
+    return ['mingw']
+  elif os == 'win32' and toolchain == 'msvc':
+    return ['msvc', 'mslink', 'mslib', 'msvs']
+  else:
+    return ['default']
+
+
 def GetVersionComponents():
   MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
   MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
@@ -1094,8 +1153,8 @@
     print env['arch']
     print env['simulator']
     Abort("Option unalignedaccesses only supported for the ARM architecture.")
-  for (name, option) in SIMPLE_OPTIONS.iteritems():
-    if (not option.get('default')) and (name not in ARGUMENTS):
+  for (name, option) in ALL_OPTIONS.iteritems():
+    if (not name in env):
       message = ("A value for option %s must be specified (%s)." %
           (name, ", ".join(option['values'])))
       Abort(message)
@@ -1225,9 +1284,9 @@
   return overrides
 
 
-def BuildSpecific(env, mode, env_overrides):
+def BuildSpecific(env, mode, env_overrides, tools):
   options = {'mode': mode}
-  for option in SIMPLE_OPTIONS:
+  for option in ALL_OPTIONS:
     options[option] = env[option]
   PostprocessOptions(options, env['os'])
 
@@ -1281,7 +1340,7 @@
   (object_files, shell_files, mksnapshot, preparser_files) = env.SConscript(
     join('src', 'SConscript'),
     build_dir=join('obj', target_id),
-    exports='context',
+    exports='context tools',
     duplicate=False
   )
 
@@ -1308,21 +1367,21 @@
   context.library_targets.append(library)
   context.library_targets.append(preparser_library)
 
-  d8_env = Environment()
+  d8_env = Environment(tools=tools)
   d8_env.Replace(**context.flags['d8'])
   context.ApplyEnvOverrides(d8_env)
   shell = d8_env.Program('d8' + suffix, object_files + shell_files)
   context.d8_targets.append(shell)
 
   for sample in context.samples:
-    sample_env = Environment()
+    sample_env = Environment(tools=tools)
     sample_env.Replace(**context.flags['sample'])
     sample_env.Prepend(LIBS=[library_name])
     context.ApplyEnvOverrides(sample_env)
     sample_object = sample_env.SConscript(
       join('samples', 'SConscript'),
       build_dir=join('obj', 'sample', sample, target_id),
-      exports='sample context',
+      exports='sample context tools',
       duplicate=False
     )
     sample_name = sample + suffix
@@ -1335,7 +1394,7 @@
   cctest_program = cctest_env.SConscript(
     join('test', 'cctest', 'SConscript'),
     build_dir=join('obj', 'test', target_id),
-    exports='context object_files',
+    exports='context object_files tools',
     duplicate=False
   )
   context.cctest_targets.append(cctest_program)
@@ -1350,7 +1409,7 @@
     exports='context',
     duplicate=False
   )
-  preparser_name = join('obj', 'preparser', target_id, 'preparser' + suffix)
+  preparser_name = join('obj', 'preparser', target_id, 'preparser')
   preparser_program = preparser_env.Program(preparser_name, preparser_object);
   preparser_env.Depends(preparser_program, preparser_library)
   context.preparser_targets.append(preparser_program)
@@ -1360,7 +1419,9 @@
 
 def Build():
   opts = GetOptions()
-  env = Environment(options=opts)
+  tools = GetTools(opts)
+  env = Environment(options=opts, tools=tools)
+
   Help(opts.GenerateHelpText(env))
   VerifyOptions(env)
   env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
@@ -1375,7 +1436,7 @@
   d8s = []
   modes = SplitList(env['mode'])
   for mode in modes:
-    context = BuildSpecific(env.Copy(), mode, env_overrides)
+    context = BuildSpecific(env.Copy(), mode, env_overrides, tools)
     libraries += context.library_targets
     mksnapshots += context.mksnapshot_targets
     cctests += context.cctest_targets
diff --git a/include/v8-preparser.h b/include/v8-preparser.h
index 9425f7d..7baac94 100644
--- a/include/v8-preparser.h
+++ b/include/v8-preparser.h
@@ -73,7 +73,7 @@
       : data_(data), size_(size) { }
 
   // Create a PreParserData value where stack_overflow reports true.
-  static PreParserData StackOverflow() { return PreParserData(NULL, 0); }
+  static PreParserData StackOverflow() { return PreParserData(0, NULL); }
   // Whether the pre-parser stopped due to a stack overflow.
   // If this is the case, size() and data() should not be used.
 
diff --git a/include/v8.h b/include/v8.h
index 62d1085..a990fc2 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1653,6 +1653,11 @@
   V8EXPORT Local<Object> Clone();
 
   /**
+   * Returns the context in which the object was created.
+   */
+  V8EXPORT Local<Context> CreationContext();
+
+  /**
    * Set the backing store of the indexed properties to be managed by the
    * embedding layer. Access to the indexed properties will follow the rules
    * spelled out in CanvasPixelArray.
diff --git a/preparser/preparser-process.cc b/preparser/preparser-process.cc
index 26dfc42..fb6e386 100644
--- a/preparser/preparser-process.cc
+++ b/preparser/preparser-process.cc
@@ -27,105 +27,64 @@
 
 #include <stdlib.h>
 #include <stdarg.h>
+#include <stdio.h>
+
 #include "../include/v8stdint.h"
 #include "../include/v8-preparser.h"
-#include "unicode-inl.h"
 
-enum ResultCode { kSuccess = 0, kErrorReading = 1, kErrorWriting = 2 };
-
-namespace v8 {
-namespace internal {
-
-// THIS FILE IS PROOF-OF-CONCEPT ONLY.
-// The final goal is a stand-alone preparser library.
+// This file is only used for testing the stand-alone preparser
+// library.
+// The first (and only) argument must be the path of a JavaScript file.
+// This file is preparsed and the resulting preparser data is written
+// to stdout. Diagnostic output is output on stderr.
+// The file must contain only ASCII characters (UTF-8 isn't supported).
+// The file is read into memory, so it should have a reasonable size.
 
 
-class UTF8InputStream : public v8::UnicodeInputStream {
+// Adapts an ASCII string to the UnicodeInputStream interface.
+class AsciiInputStream : public v8::UnicodeInputStream {
  public:
-  UTF8InputStream(uint8_t* buffer, size_t length)
+  AsciiInputStream(uint8_t* buffer, size_t length)
       : buffer_(buffer),
-        offset_(0),
-        pos_(0),
-        end_offset_(static_cast<int>(length)) { }
+        end_offset_(static_cast<int>(length)),
+        offset_(0) { }
 
-  virtual ~UTF8InputStream() { }
+  virtual ~AsciiInputStream() { }
 
   virtual void PushBack(int32_t ch) {
-    // Pushback assumes that the character pushed back is the
-    // one that was most recently read, and jumps back in the
-    // UTF-8 stream by the length of that character's encoding.
-    offset_ -= unibrow::Utf8::Length(ch);
-    pos_--;
+    offset_--;
 #ifdef DEBUG
-    if (static_cast<unsigned>(ch) <= unibrow::Utf8::kMaxOneByteChar) {
-      if (ch != buffer_[offset_]) {
-        fprintf(stderr, "Invalid pushback: '%c'.", ch);
-        exit(1);
-      }
-    } else {
-      unsigned tmp = 0;
-      if (static_cast<unibrow::uchar>(ch) !=
-          unibrow::Utf8::CalculateValue(buffer_ + offset_,
-                                        end_offset_ - offset_,
-                                        &tmp)) {
-        fprintf(stderr, "Invalid pushback: 0x%x.", ch);
-        exit(1);
-      }
+    if (offset_ < 0 ||
+        (ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) {
+      fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_);
+      exit(1);
     }
 #endif
   }
 
   virtual int32_t Next() {
-    if (offset_ == end_offset_) return -1;
-    uint8_t first_char = buffer_[offset_];
-    if (first_char <= unibrow::Utf8::kMaxOneByteChar) {
-      pos_++;
-      offset_++;
-      return static_cast<int32_t>(first_char);
+    if (offset_ >= end_offset_) {
+      offset_++;  // Increment anyway to allow symmetric pushbacks.
+      return -1;
     }
-    unibrow::uchar codepoint =
-        unibrow::Utf8::CalculateValue(buffer_ + offset_,
-                                      end_offset_ - offset_,
-                                      &offset_);
-    pos_++;
-    return static_cast<int32_t>(codepoint);
+    uint8_t next_char = buffer_[offset_];
+#ifdef DEBUG
+    if (next_char > 0x7fu) {
+      fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
+      exit(1);
+    }
+#endif
+    offset_++;
+    return static_cast<int32_t>(next_char);
   }
 
  private:
   const uint8_t* buffer_;
-  unsigned offset_;
-  unsigned pos_;
-  unsigned end_offset_;
+  const int end_offset_;
+  int offset_;
 };
 
 
-// Write a number to dest in network byte order.
-void WriteUInt32(FILE* dest, uint32_t value, bool* ok) {
-  for (int i = 3; i >= 0; i--) {
-    uint8_t byte = static_cast<uint8_t>(value >> (i << 3));
-    int result = fputc(byte, dest);
-    if (result == EOF) {
-      *ok = false;
-      return;
-    }
-  }
-}
-
-// Read number from FILE* in network byte order.
-uint32_t ReadUInt32(FILE* source, bool* ok) {
-  uint32_t n = 0;
-  for (int i = 0; i < 4; i++) {
-    int c = fgetc(source);
-    if (c == EOF) {
-      *ok = false;
-      return 0;
-    }
-    n = (n << 8) + static_cast<uint32_t>(c);
-  }
-  return n;
-}
-
-
 bool ReadBuffer(FILE* source, void* buffer, size_t length) {
   size_t actually_read = fread(buffer, 1, length, source);
   return (actually_read == length);
@@ -150,57 +109,61 @@
 };
 
 
-// Preparse input and output result on stdout.
-int PreParseIO(FILE* input) {
-  fprintf(stderr, "LOG: Enter parsing loop\n");
-  bool ok = true;
-  uint32_t length = ReadUInt32(input, &ok);
-  fprintf(stderr, "LOG: Input length: %d\n", length);
-  if (!ok) return kErrorReading;
-  ScopedPointer<uint8_t> buffer(new uint8_t[length]);
-
-  if (!ReadBuffer(input, *buffer, length)) {
-    return kErrorReading;
-  }
-  UTF8InputStream input_buffer(*buffer, static_cast<size_t>(length));
-
-  v8::PreParserData data =
-      v8::Preparse(&input_buffer, 64 * 1024 * sizeof(void*));  // NOLINT
-  if (data.stack_overflow()) {
-    fprintf(stderr, "LOG: Stack overflow\n");
+int main(int argc, char* argv[]) {
+  // Check for filename argument.
+  if (argc < 2) {
+    fprintf(stderr, "ERROR: No filename on command line.\n");
     fflush(stderr);
-    // Report stack overflow error/no-preparser-data.
-    WriteUInt32(stdout, 0, &ok);
-    if (!ok) return kErrorWriting;
-    return 0;
+    return EXIT_FAILURE;
+  }
+  const char* filename = argv[1];
+
+  // Open JS file.
+  FILE* input = fopen(filename, "rb");
+  if (input == NULL) {
+    perror("ERROR: Error opening file");
+    fflush(stderr);
+    return EXIT_FAILURE;
   }
 
+  // Find length of JS file.
+  if (fseek(input, 0, SEEK_END) != 0) {
+    perror("ERROR: Error during seek");
+    fflush(stderr);
+    return EXIT_FAILURE;
+  }
+  size_t length = static_cast<size_t>(ftell(input));
+  rewind(input);
+
+  // Read JS file into memory buffer.
+  ScopedPointer<uint8_t> buffer(new uint8_t[length]);
+  if (!ReadBuffer(input, *buffer, length)) {
+    perror("ERROR: Reading file");
+    fflush(stderr);
+    return EXIT_FAILURE;
+  }
+  fclose(input);
+
+  // Preparse input file.
+  AsciiInputStream input_buffer(*buffer, length);
+  size_t kMaxStackSize = 64 * 1024 * sizeof(void*);  // NOLINT
+  v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
+
+  // Fail if stack overflow.
+  if (data.stack_overflow()) {
+    fprintf(stderr, "ERROR: Stack overflow\n");
+    fflush(stderr);
+    return EXIT_FAILURE;
+  }
+
+  // Print preparser data to stdout.
   uint32_t size = data.size();
   fprintf(stderr, "LOG: Success, data size: %u\n", size);
   fflush(stderr);
-  WriteUInt32(stdout, size, &ok);
-  if (!ok) return kErrorWriting;
   if (!WriteBuffer(stdout, data.data(), size)) {
-    return kErrorWriting;
+    perror("ERROR: Writing data");
+    return EXIT_FAILURE;
   }
-  return 0;
-}
 
-} }  // namespace v8::internal
-
-
-int main(int argc, char* argv[]) {
-  FILE* input = stdin;
-  if (argc > 1) {
-    char* arg = argv[1];
-    input = fopen(arg, "rb");
-    if (input == NULL) return EXIT_FAILURE;
-  }
-  int status = 0;
-  do {
-    status = v8::internal::PreParseIO(input);
-  } while (status == 0);
-  fprintf(stderr, "EXIT: Failure %d\n", status);
-  fflush(stderr);
-  return EXIT_FAILURE;
+  return EXIT_SUCCESS;
 }
diff --git a/samples/shell.cc b/samples/shell.cc
index 0710d46..aebccc6 100644
--- a/samples/shell.cc
+++ b/samples/shell.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -112,6 +112,7 @@
         v8::Handle<v8::String> source = ReadFile(arg);
         if (source.IsEmpty()) {
           printf("Error reading '%s'\n", arg);
+          continue;
         }
         if (!ExecuteString(source, file_name, false, true)) {
           ExitShell(1);
diff --git a/src/SConscript b/src/SConscript
index 3b9968e..e5f4e32 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -31,6 +31,7 @@
 sys.path.append(join(root_dir, 'tools'))
 import js2c
 Import('context')
+Import('tools')
 
 
 SOURCES = {
@@ -325,7 +326,7 @@
 
 
 def ConfigureObjectFiles():
-  env = Environment()
+  env = Environment(tools=tools)
   env.Replace(**context.flags['v8'])
   context.ApplyEnvOverrides(env)
   env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
diff --git a/src/accessors.cc b/src/accessors.cc
index e33b4d7..ee7636e 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -568,172 +568,6 @@
 // Accessors::FunctionArguments
 //
 
-static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
-  if (slot_index >= 0) {
-    const int offset = JavaScriptFrameConstants::kLocal0Offset;
-    return frame->fp() + offset - (slot_index * kPointerSize);
-  } else {
-    const int offset = JavaScriptFrameConstants::kReceiverOffset;
-    return frame->caller_sp() + offset + (slot_index * kPointerSize);
-  }
-}
-
-
-// We can't intermix stack decoding and allocations because
-// deoptimization infrastracture is not GC safe.
-// Thus we build a temporary structure in malloced space.
-class SlotRef BASE_EMBEDDED {
- public:
-  enum SlotRepresentation {
-    UNKNOWN,
-    TAGGED,
-    INT32,
-    DOUBLE,
-    LITERAL
-  };
-
-  SlotRef()
-      : addr_(NULL), representation_(UNKNOWN) { }
-
-  SlotRef(Address addr, SlotRepresentation representation)
-      : addr_(addr), representation_(representation) { }
-
-  explicit SlotRef(Object* literal)
-      : literal_(literal), representation_(LITERAL) { }
-
-  Handle<Object> GetValue() {
-    switch (representation_) {
-      case TAGGED:
-        return Handle<Object>(Memory::Object_at(addr_));
-
-      case INT32: {
-        int value = Memory::int32_at(addr_);
-        if (Smi::IsValid(value)) {
-          return Handle<Object>(Smi::FromInt(value));
-        } else {
-          return Isolate::Current()->factory()->NewNumberFromInt(value);
-        }
-      }
-
-      case DOUBLE: {
-        double value = Memory::double_at(addr_);
-        return Isolate::Current()->factory()->NewNumber(value);
-      }
-
-      case LITERAL:
-        return literal_;
-
-      default:
-        UNREACHABLE();
-        return Handle<Object>::null();
-    }
-  }
-
- private:
-  Address addr_;
-  Handle<Object> literal_;
-  SlotRepresentation representation_;
-};
-
-
-static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
-                                          DeoptimizationInputData* data,
-                                          JavaScriptFrame* frame) {
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-
-  switch (opcode) {
-    case Translation::BEGIN:
-    case Translation::FRAME:
-      // Peeled off before getting here.
-      break;
-
-    case Translation::ARGUMENTS_OBJECT:
-      // This can be only emitted for local slots not for argument slots.
-      break;
-
-    case Translation::REGISTER:
-    case Translation::INT32_REGISTER:
-    case Translation::DOUBLE_REGISTER:
-    case Translation::DUPLICATE:
-      // We are at safepoint which corresponds to call.  All registers are
-      // saved by caller so there would be no live registers at this
-      // point. Thus these translation commands should not be used.
-      break;
-
-    case Translation::STACK_SLOT: {
-      int slot_index = iterator->Next();
-      Address slot_addr = SlotAddress(frame, slot_index);
-      return SlotRef(slot_addr, SlotRef::TAGGED);
-    }
-
-    case Translation::INT32_STACK_SLOT: {
-      int slot_index = iterator->Next();
-      Address slot_addr = SlotAddress(frame, slot_index);
-      return SlotRef(slot_addr, SlotRef::INT32);
-    }
-
-    case Translation::DOUBLE_STACK_SLOT: {
-      int slot_index = iterator->Next();
-      Address slot_addr = SlotAddress(frame, slot_index);
-      return SlotRef(slot_addr, SlotRef::DOUBLE);
-    }
-
-    case Translation::LITERAL: {
-      int literal_index = iterator->Next();
-      return SlotRef(data->LiteralArray()->get(literal_index));
-    }
-  }
-
-  UNREACHABLE();
-  return SlotRef();
-}
-
-
-
-
-
-static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
-                                           int inlined_frame_index,
-                                           Vector<SlotRef>* args_slots) {
-  AssertNoAllocation no_gc;
-  int deopt_index = AstNode::kNoNumber;
-  DeoptimizationInputData* data =
-      static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
-  TranslationIterator it(data->TranslationByteArray(),
-                         data->TranslationIndex(deopt_index)->value());
-  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-  ASSERT(opcode == Translation::BEGIN);
-  int frame_count = it.Next();
-  USE(frame_count);
-  ASSERT(frame_count > inlined_frame_index);
-  int frames_to_skip = inlined_frame_index;
-  while (true) {
-    opcode = static_cast<Translation::Opcode>(it.Next());
-    // Skip over operands to advance to the next opcode.
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
-    if (opcode == Translation::FRAME) {
-      if (frames_to_skip == 0) {
-        // We reached the frame corresponding to the inlined function
-        // in question.  Process the translation commands for the
-        // arguments.
-        //
-        // Skip the translation command for the receiver.
-        it.Skip(Translation::NumberOfOperandsFor(
-            static_cast<Translation::Opcode>(it.Next())));
-        // Compute slots for arguments.
-        for (int i = 0; i < args_slots->length(); ++i) {
-          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
-        }
-        return;
-      }
-      frames_to_skip--;
-    }
-  }
-
-  UNREACHABLE();
-}
-
 
 static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
     JavaScriptFrame* frame,
@@ -742,7 +576,9 @@
   Factory* factory = Isolate::Current()->factory();
   int args_count = inlined_function->shared()->formal_parameter_count();
   ScopedVector<SlotRef> args_slots(args_count);
-  ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
+  SlotRef::ComputeSlotMappingForArguments(frame,
+                                          inlined_frame_index,
+                                          &args_slots);
   Handle<JSObject> arguments =
       factory->NewArgumentsObject(inlined_function, args_count);
   Handle<FixedArray> array = factory->NewFixedArray(args_count);
diff --git a/src/api.cc b/src/api.cc
index 0d5e4f0..b0d287d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1302,7 +1302,7 @@
   }
   // Copy the data to align it.
   unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
-  i::MemCopy(deserialized_data, data, length);
+  i::OS::MemCopy(deserialized_data, data, length);
 
   return new i::ScriptDataImpl(
       i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@@ -2879,6 +2879,33 @@
 }
 
 
+static i::Context* GetCreationContext(i::JSObject* object) {
+  i::Object* constructor = object->map()->constructor();
+  i::JSFunction* function;
+  if (!constructor->IsJSFunction()) {
+    // API functions have null as a constructor,
+    // but any JSFunction knows its context immediately.
+    ASSERT(object->IsJSFunction() &&
+           i::JSFunction::cast(object)->shared()->IsApiFunction());
+    function = i::JSFunction::cast(object);
+  } else {
+    function = i::JSFunction::cast(constructor);
+  }
+  return function->context()->global_context();
+}
+
+
+Local<v8::Context> v8::Object::CreationContext() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate,
+             "v8::Object::CreationContext()", return Local<v8::Context>());
+  ENTER_V8(isolate);
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Context* context = GetCreationContext(*self);
+  return Utils::ToLocal(i::Handle<i::Context>(context));
+}
+
+
 int v8::Object::GetIdentityHash() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
@@ -4175,9 +4202,11 @@
 
     // Call ResetDateCache(0 but expect no exceptions:
     bool caught_exception = false;
-    i::Handle<i::Object> result =
-        i::Execution::TryCall(func, isolate->js_builtins_object(), 0, NULL,
-        &caught_exception);
+    i::Execution::TryCall(func,
+                          isolate->js_builtins_object(),
+                          0,
+                          NULL,
+                          &caught_exception);
   }
 }
 
@@ -4246,7 +4275,9 @@
   ENTER_V8(isolate);
   int real_length = length > 0 ? length : 0;
   i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
-  obj->set_length(*isolate->factory()->NewNumberFromInt(real_length));
+  i::Handle<i::Object> length_obj =
+      isolate->factory()->NewNumberFromInt(real_length);
+  obj->set_length(*length_obj);
   return Utils::ToLocal(obj);
 }
 
diff --git a/src/arguments.h b/src/arguments.h
index c80548f..a7a30e2 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -99,8 +99,17 @@
   Object* values_[3];
 };
 
-#define RUNTIME_CALLING_CONVENTION Arguments args, Isolate* isolate
-#define RUNTIME_GET_ISOLATE ASSERT(isolate == Isolate::Current())
+
+#define DECLARE_RUNTIME_FUNCTION(Type, Name)    \
+Type Name(Arguments args, Isolate* isolate)
+
+
+#define RUNTIME_FUNCTION(Type, Name)            \
+Type Name(Arguments args, Isolate* isolate)
+
+
+#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
+
 
 } }  // namespace v8::internal
 
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index bd76d9a..3e19a45 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -223,9 +223,9 @@
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -234,7 +234,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index be34df9..49b1975 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -44,11 +44,12 @@
 namespace v8 {
 namespace internal {
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
 
 #ifdef __arm__
 static uint64_t CpuFeaturesImpliedByCompiler() {
@@ -70,7 +71,11 @@
 #endif  // def __arm__
 
 
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
 #ifndef __arm__
   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
   if (FLAG_enable_vfp3) {
@@ -81,7 +86,7 @@
     supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     supported_ |= CpuFeaturesImpliedByCompiler();
     return;  // No features if we might serialize.
@@ -98,8 +103,6 @@
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
-
-  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -268,8 +271,8 @@
 static const int kMinimalBufferSize = 4*KB;
 
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
       allow_peephole_optimization_(false),
       emit_debug_code_(FLAG_debug_code) {
@@ -715,7 +718,7 @@
         *instr ^= kMovMvnFlip;
         return true;
       } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
-        if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+        if (CpuFeatures::IsSupported(ARMv7)) {
           if (imm32 < 0x10000) {
             *instr ^= kMovwLeaveCCFlip;
             *instr |= EncodeMovwImmediate(imm32);
@@ -779,7 +782,7 @@
     // condition code additional instruction conventions can be used.
     if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
       if (must_use_constant_pool() ||
-          !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+          !CpuFeatures::IsSupported(ARMv7)) {
         // mov instruction will be an ldr from constant pool (one instruction).
         return true;
       } else {
@@ -822,7 +825,7 @@
       Condition cond = Instruction::ConditionField(instr);
       if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
         if (x.must_use_constant_pool() ||
-            !isolate()->cpu_features()->IsSupported(ARMv7)) {
+            !CpuFeatures::IsSupported(ARMv7)) {
           RecordRelocInfo(x.rmode_, x.imm32_);
           ldr(rd, MemOperand(pc, 0), cond);
         } else {
@@ -1265,7 +1268,7 @@
                      const Operand& src,
                      Condition cond) {
   // v6 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.rm_.is(pc));
   ASSERT((satpos >= 0) && (satpos <= 31));
   ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -1293,7 +1296,7 @@
                      int width,
                      Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1313,7 +1316,7 @@
                      int width,
                      Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1328,7 +1331,7 @@
 //   bfc dst, #lsb, #width
 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1347,7 +1350,7 @@
                     int width,
                     Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1619,7 +1622,7 @@
 
 void Assembler::ldrd(Register dst1, Register dst2,
                      const MemOperand& src, Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   ASSERT(src.rm().is(no_reg));
   ASSERT(!dst1.is(lr));  // r14.
   ASSERT_EQ(0, dst1.code() % 2);
@@ -1634,7 +1637,7 @@
   ASSERT(!src1.is(lr));  // r14.
   ASSERT_EQ(0, src1.code() % 2);
   ASSERT_EQ(src1.code() + 1, src2.code());
-  ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
@@ -1870,7 +1873,7 @@
   // Instruction details available in ARM DDI 0406A, A8-628.
   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1011(11-8) | offset
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -1912,7 +1915,7 @@
   // Instruction details available in ARM DDI 0406A, A8-628.
   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | offset
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -1956,7 +1959,7 @@
   // Instruction details available in ARM DDI 0406A, A8-786.
   // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
   // Vsrc(15-12) | 1011(11-8) | (offset/4)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -1997,7 +2000,7 @@
   // Instruction details available in ARM DDI 0406A, A8-786.
   // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | (offset/4)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -2043,7 +2046,7 @@
 // Only works for little endian floating point formats.
 // We don't support VFP on the mixed endian floating point platform.
 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
-  ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
 
   // VMOV can accept an immediate of the form:
   //
@@ -2096,7 +2099,7 @@
                      const Condition cond) {
   // Dd = immediate
   // Instruction details available in ARM DDI 0406B, A8-640.
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
 
   uint32_t enc;
   if (FitsVMOVDoubleImmediate(imm, &enc)) {
@@ -2133,7 +2136,7 @@
                      const Condition cond) {
   // Sd = Sm
   // Instruction details available in ARM DDI 0406B, A8-642.
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int sd, d, sm, m;
   dst.split_code(&sd, &d);
   src.split_code(&sm, &m);
@@ -2146,7 +2149,7 @@
                      const Condition cond) {
   // Dd = Dm
   // Instruction details available in ARM DDI 0406B, A8-642.
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0xB*B20 |
        dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
 }
@@ -2160,7 +2163,7 @@
   // Instruction details available in ARM DDI 0406A, A8-646.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!src1.is(pc) && !src2.is(pc));
   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
        src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@@ -2175,7 +2178,7 @@
   // Instruction details available in ARM DDI 0406A, A8-646.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!dst1.is(pc) && !dst2.is(pc));
   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
        dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@@ -2189,7 +2192,7 @@
   // Instruction details available in ARM DDI 0406A, A8-642.
   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!src.is(pc));
   int sn, n;
   dst.split_code(&sn, &n);
@@ -2204,7 +2207,7 @@
   // Instruction details available in ARM DDI 0406A, A8-642.
   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!dst.is(pc));
   int sn, n;
   src.split_code(&sn, &n);
@@ -2329,7 +2332,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
 }
 
@@ -2338,7 +2341,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
 }
 
@@ -2347,7 +2350,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
 }
 
@@ -2356,7 +2359,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -2365,7 +2368,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -2374,7 +2377,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
 }
 
@@ -2383,7 +2386,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -2413,7 +2416,7 @@
   // Instruction details available in ARM DDI 0406A, A8-536.
   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
 }
@@ -2428,7 +2431,7 @@
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
 }
@@ -2443,7 +2446,7 @@
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
 }
@@ -2458,7 +2461,7 @@
   // Instruction details available in ARM DDI 0406A, A8-584.
   // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
 }
@@ -2471,7 +2474,7 @@
   // Instruction details available in ARM DDI 0406A, A8-570.
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
        src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
 }
@@ -2484,7 +2487,7 @@
   // Instruction details available in ARM DDI 0406A, A8-570.
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(src2 == 0.0);
   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
        src1.code()*B12 | 0x5*B9 | B8 | B6);
@@ -2495,7 +2498,7 @@
   // Instruction details available in ARM DDI 0406A, A8-652.
   // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0xE*B20 |  B16 |
        dst.code()*B12 | 0xA*B8 | B4);
 }
@@ -2505,7 +2508,7 @@
   // Instruction details available in ARM DDI 0406A, A8-652.
   // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
        dst.code()*B12 | 0xA*B8 | B4);
 }
@@ -2516,7 +2519,7 @@
                       const Condition cond) {
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
        dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
 }
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 91e6244..c9f8cfe 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -468,58 +468,97 @@
 
 // CpuFeatures keeps track of which features are supported by the target CPU.
 // Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == VFP3 && !FLAG_enable_vfp3) return false;
     return (supported_ & (1u << f)) != 0;
   }
 
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (1u << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+    return (enabled & (1u << f)) != 0;
   }
+#endif
 
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      unsigned mask = 1u << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-             (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= 1u << f;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    unsigned old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    unsigned old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (1u << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const unsigned old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  unsigned supported_;
-  unsigned enabled_;
-  unsigned found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static unsigned supported_;
+  static unsigned found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -564,7 +603,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index f401cfd..9cca536 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1173,9 +1173,11 @@
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  // Probe the CPU to set the supported features, because this builtin
-  // may be called before the initialization performs CPU setup.
-  masm->isolate()->cpu_features()->Probe(false);
+  CpuFeatures::TryForceFeatureScope scope(VFP3);
+  if (!CpuFeatures::IsSupported(VFP3)) {
+    __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
+    return;
+  }
 
   // Lookup the function in the JavaScript frame and push it as an
   // argument to the on-stack replacement function.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 441adfe..328b519 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -502,7 +502,7 @@
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
                                    Register scratch2) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
     __ vmov(d7.high(), scratch1);
@@ -570,7 +570,7 @@
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
 
   // Handle loading a double from a heap number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+  if (CpuFeatures::IsSupported(VFP3) &&
       destination == kVFPRegisters) {
     CpuFeatures::Scope scope(VFP3);
     // Load the double from tagged HeapNumber to double register.
@@ -585,7 +585,7 @@
 
   // Handle loading a double from a smi.
   __ bind(&is_smi);
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Convert smi to double using VFP instructions.
     __ SmiUntag(scratch1, object);
@@ -676,7 +676,7 @@
 
   __ JumpIfNotSmi(object, &obj_is_not_smi);
   __ SmiUntag(scratch1, object);
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(single_scratch, scratch1);
     __ vcvt_f64_s32(double_dst, single_scratch);
@@ -744,7 +744,7 @@
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
 
   // Load the number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Load the double value.
     __ sub(scratch1, object, Operand(kHeapObjectTag));
@@ -818,7 +818,7 @@
 
   // Object is a heap number.
   // Convert the floating point value to a 32-bit integer.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     SwVfpRegister single_scratch = double_scratch.low();
     // Load the double value.
@@ -1153,7 +1153,7 @@
   }
 
   // Lhs is a smi, rhs is a number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     // Convert lhs to a double in d7.
     CpuFeatures::Scope scope(VFP3);
     __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
@@ -1193,7 +1193,7 @@
   }
 
   // Rhs is a smi, lhs is a heap number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Load the double from lhs, tagged HeapNumber r1, to d7.
     __ sub(r7, lhs, Operand(kHeapObjectTag));
@@ -1373,7 +1373,7 @@
 
   // Both are heap numbers.  Load them up then jump to the code we have
   // for that.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ sub(r7, rhs, Operand(kHeapObjectTag));
     __ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -1463,7 +1463,7 @@
   Label load_result_from_cache;
   if (!object_is_smi) {
     __ JumpIfSmi(object, &is_smi);
-    if (isolate->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       __ CheckMap(object,
                   scratch1,
@@ -1597,7 +1597,7 @@
   // The arguments have been converted to doubles and stored in d6 and d7, if
   // VFP3 is supported, or in r0, r1, r2, and r3.
   Isolate* isolate = masm->isolate();
-  if (isolate->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     __ bind(&lhs_not_nan);
     CpuFeatures::Scope scope(VFP3);
     Label no_nan;
@@ -1707,7 +1707,7 @@
 // The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses VFP3 instructions.
-  ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
 
   Label false_result;
   Label not_heap_number;
@@ -1794,7 +1794,7 @@
     const Builtins::JavaScript& builtin) {
   Label slow, slow_reverse, do_the_call;
   bool use_fp_registers =
-      Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+      CpuFeatures::IsSupported(VFP3) &&
       Token::MOD != op_;
 
   ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
@@ -1811,7 +1811,7 @@
 
     // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
     // using registers d7 and d6 for the double values.
-    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
       __ vmov(s15, r7);
@@ -1907,7 +1907,7 @@
       __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
       }
 
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         CpuFeatures::Scope scope(VFP3);
         // Convert smi in r0 to double in d7.
         __ mov(r7, Operand(r0, ASR, kSmiTagSize));
@@ -1964,7 +1964,7 @@
       __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
       }
 
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         CpuFeatures::Scope scope(VFP3);
         // Convert smi in r1 to double in d6.
         __ mov(r7, Operand(r1, ASR, kSmiTagSize));
@@ -2177,7 +2177,7 @@
       // The code below for writing into heap numbers isn't capable of writing
       // the register as an unsigned int so we go to slow case if we hit this
       // case.
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         __ b(mi, &result_not_a_smi);
       } else {
         __ b(mi, &slow);
@@ -2225,7 +2225,7 @@
   // result.
   __ mov(r0, Operand(r5));
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
     CpuFeatures::Scope scope(VFP3);
     __ vmov(s0, r2);
@@ -3077,7 +3077,7 @@
       // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
       // depending on whether VFP3 is available or not.
       FloatingPointHelper::Destination destination =
-          Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+          CpuFeatures::IsSupported(VFP3) &&
           op_ != Token::MOD ?
           FloatingPointHelper::kVFPRegisters :
           FloatingPointHelper::kCoreRegisters;
@@ -3190,7 +3190,7 @@
           // The code below for writing into heap numbers isn't capable of
           // writing the register as an unsigned int so we go to slow case if we
           // hit this case.
-          if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+          if (CpuFeatures::IsSupported(VFP3)) {
             __ b(mi, &result_not_a_smi);
           } else {
             __ b(mi, not_numbers);
@@ -3229,7 +3229,7 @@
       // result.
       __ mov(r0, Operand(r5));
 
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
         // mentioned above SHR needs to always produce a positive result.
         CpuFeatures::Scope scope(VFP3);
@@ -3358,7 +3358,7 @@
     // Jump to type transition if they are not. The registers r0 and r1 (right
     // and left) are preserved for the runtime call.
     FloatingPointHelper::Destination destination =
-        Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+        CpuFeatures::IsSupported(VFP3) &&
         op_ != Token::MOD ?
         FloatingPointHelper::kVFPRegisters :
         FloatingPointHelper::kCoreRegisters;
@@ -3545,7 +3545,7 @@
           // to return a heap number if we can.
           // The non vfp3 code does not support this special case, so jump to
           // runtime if we don't support it.
-          if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+          if (CpuFeatures::IsSupported(VFP3)) {
             __ b(mi,
                  (result_type_ <= TRBinaryOpIC::INT32) ? &transition
                                                        : &return_heap_number);
@@ -3571,7 +3571,7 @@
       __ Ret();
 
       __ bind(&return_heap_number);
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         CpuFeatures::Scope scope(VFP3);
         heap_number_result = r5;
         GenerateHeapResultAllocation(masm,
@@ -3806,7 +3806,7 @@
   const Register cache_entry = r0;
   const bool tagged = (argument_type_ == TAGGED);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     if (tagged) {
       // Argument is a number and is on stack and in r0.
@@ -3894,7 +3894,7 @@
        __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
     }
     __ Ret();
-  }  // if (Isolate::Current()->cpu_features()->IsSupported(VFP3))
+  }  // if (CpuFeatures::IsSupported(VFP3))
 
   __ bind(&calculate);
   if (tagged) {
@@ -3903,7 +3903,7 @@
         ExternalReference(RuntimeFunction(), masm->isolate());
     __ TailCallExternalReference(runtime_function, 1, 1);
   } else {
-    if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE();
+    if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
     CpuFeatures::Scope scope(VFP3);
 
     Label no_update;
@@ -4102,7 +4102,7 @@
       __ mov(r0, Operand(r2));
     }
 
-    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
       CpuFeatures::Scope scope(VFP3);
       __ vmov(s0, r1);
@@ -4143,7 +4143,7 @@
 void MathPowStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
 
     Label base_not_smi;
@@ -6807,7 +6807,7 @@
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or VFP3 is unsupported.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
 
     // Load left and right operand
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 1dde255..2b1ce4c 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -235,7 +235,7 @@
         operands_type_(TRBinaryOpIC::UNINITIALIZED),
         result_type_(TRBinaryOpIC::UNINITIALIZED),
         name_(NULL) {
-    use_vfp3_ = Isolate::Current()->cpu_features()->IsSupported(VFP3);
+    use_vfp3_ = CpuFeatures::IsSupported(VFP3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 91c4747..7b3ea14 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -770,7 +770,7 @@
     true_target->Branch(eq);
 
     // Slow case.
-    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       // Implements the slow case by using ToBooleanStub.
       // The ToBooleanStub takes a single argument, and
@@ -967,8 +967,7 @@
 void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
   ASSERT(Token::IsBitOp(op_));
 
-  if ((op_ == Token::SHR) &&
-      !Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
     // >>> requires an unsigned to double conversion and the non VFP code
     // does not support this conversion.
     __ b(cond, entry_label());
@@ -1072,7 +1071,7 @@
 void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
                                                    Register heap_number,
                                                    Register scratch) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(s0, answer);
     if (op_ == Token::SHR) {
@@ -1142,7 +1141,7 @@
         // SHR is special because it is required to produce a positive answer.
         __ cmp(int32, Operand(0, RelocInfo::NONE));
       }
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         __ b(mi, &result_not_a_smi);
       } else {
         // Non VFP code cannot convert from unsigned to double, so fall back
@@ -4617,7 +4616,7 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (!CpuFeatures::IsSupported(VFP3)) {
     frame_->CallRuntime(Runtime::kMath_pow, 2);
     frame_->EmitPush(r0);
   } else {
@@ -4771,7 +4770,7 @@
   ASSERT(args->length() == 1);
   Load(args->at(0));
 
-  if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (!CpuFeatures::IsSupported(VFP3)) {
     frame_->CallRuntime(Runtime::kMath_sqrt, 1);
     frame_->EmitPush(r0);
   } else {
@@ -5360,9 +5359,10 @@
   // Convert 32 random bits in r0 to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    __ PrepareCallCFunction(0, r1);
-    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(1, r0);
+    __ mov(r0, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
     CpuFeatures::Scope scope(VFP3);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
@@ -5380,10 +5380,11 @@
     __ vstr(d7, r0, HeapNumber::kValueOffset);
     frame_->EmitPush(r4);
   } else {
+    __ PrepareCallCFunction(2, r0);
     __ mov(r0, Operand(r4));
-    __ PrepareCallCFunction(1, r1);
+    __ mov(r1, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(
-        ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
     frame_->EmitPush(r0);
   }
 }
@@ -5674,7 +5675,7 @@
 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                  TranscendentalCacheStub::TAGGED);
     frame_->SpillAllButCopyTOSToR0();
@@ -5689,7 +5690,7 @@
 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     TranscendentalCacheStub stub(TranscendentalCache::COS,
                                  TranscendentalCacheStub::TAGGED);
     frame_->SpillAllButCopyTOSToR0();
@@ -5704,7 +5705,7 @@
 void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                  TranscendentalCacheStub::TAGGED);
     frame_->SpillAllButCopyTOSToR0();
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 0f5bf56..5bd2029 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -42,11 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(VFP3) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(VFP3);
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3a3dcf0..f0a6937 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -586,14 +586,16 @@
 
   // Allocate a new deoptimizer object.
   // Pass four arguments in r0 to r3 and fifth argument on stack.
-  __ PrepareCallCFunction(5, r5);
+  __ PrepareCallCFunction(6, r5);
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ mov(r1, Operand(type()));  // bailout type,
   // r2: bailout id already loaded.
   // r3: code address or 0 already loaded.
   __ str(r4, MemOperand(sp, 0 * kPointerSize));  // Fp-to-sp delta.
+  __ mov(r5, Operand(ExternalReference::isolate_address()));
+  __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
   // Call Deoptimizer::New().
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
 
   // Preserve "deoptimizer" object in register r0 and get the input
   // frame descriptor pointer to r1 (deoptimizer->input_);
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 4aa8d6a..d6846c8 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -136,7 +136,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 088ba58..3267951 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -562,7 +562,7 @@
 void FullCodeGenerator::DoTest(Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Emit the inlined tests assumed by the stub.
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -1622,27 +1622,26 @@
       break;
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(r0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2352,16 +2351,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the fast code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
-
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2543,11 +2532,75 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  if (FLAG_debug_code) __ AbortIfSmi(r0);
+
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
+  __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ b(ne, if_true);
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r2, ip);
+  __ b(eq, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ ldr(r4, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+  __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  // r4: descriptor array
+  // r3: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Calculate location of the first key name.
+  __ add(r4,
+         r4,
+         Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+                 DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  // The use of ip to store the valueOf symbol asumes that it is not otherwise
+  // used in the loop below.
+  __ mov(ip, Operand(FACTORY->value_of_symbol()));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ ldr(r3, MemOperand(r4, 0));
+  __ cmp(r3, ip);
+  __ b(eq, if_false);
+  __ add(r4, r4, Operand(kPointerSize));
+  __ bind(&entry);
+  __ cmp(r4, Operand(r2));
+  __ b(ne, &loop);
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, if_false);
+  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+  __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ cmp(r2, r3);
+  __ b(ne, if_false);
+
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ ldrb(r2, FieldMemOperand(r4, Map::kBitField2Offset));
+  __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ strb(r2, FieldMemOperand(r4, Map::kBitField2Offset));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2802,9 +2855,10 @@
   // Convert 32 random bits in r0 to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  if (isolate()->cpu_features()->IsSupported(VFP3)) {
-    __ PrepareCallCFunction(0, r1);
-    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(1, r0);
+    __ mov(r0, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
     CpuFeatures::Scope scope(VFP3);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
@@ -2822,10 +2876,11 @@
     __ vstr(d7, r0, HeapNumber::kValueOffset);
     __ mov(r0, r4);
   } else {
+    __ PrepareCallCFunction(2, r0);
     __ mov(r0, Operand(r4));
-    __ PrepareCallCFunction(1, r1);
+    __ mov(r1, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(
-        ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
 
   context()->Plug(r0);
@@ -3827,7 +3882,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailout(expr->increment(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5d31473..d69f0d5 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1723,14 +1723,21 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal();
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), r0);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
   if (instr->check_hole_value()) {
     LOperand* temp = TempRegister();
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 77aabaf..32ec0f8 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -119,7 +119,8 @@
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
   V(LoadFunctionPrototype)                      \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -1259,10 +1260,25 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 75406cf..7a3ab38 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2163,7 +2163,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
@@ -2175,6 +2175,18 @@
 }
 
 
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  __ mov(r2, Operand(instr->name()));
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+                                             : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 3a1a8b6..2ba98f4 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -39,11 +39,14 @@
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(HEAP->undefined_value()) {
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -292,7 +295,7 @@
 
   } else if (!src2.is_single_instruction() &&
              !src2.must_use_constant_pool() &&
-             Isolate::Current()->cpu_features()->IsSupported(ARMv7) &&
+             CpuFeatures::IsSupported(ARMv7) &&
              IsPowerOf2(src2.immediate() + 1)) {
     ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
 
@@ -305,7 +308,7 @@
 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
                           Condition cond) {
   ASSERT(lsb < 32);
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     and_(dst, src1, Operand(mask), LeaveCC, cond);
     if (lsb != 0) {
@@ -320,7 +323,7 @@
 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
                           Condition cond) {
   ASSERT(lsb < 32);
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     and_(dst, src1, Operand(mask), LeaveCC, cond);
     int shift_up = 32 - lsb - width;
@@ -348,7 +351,7 @@
   ASSERT(lsb + width < 32);
   ASSERT(!scratch.is(dst));
   if (width == 0) return;
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     bic(dst, dst, Operand(mask));
     and_(scratch, src, Operand((1 << width) - 1));
@@ -362,7 +365,7 @@
 
 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
   ASSERT(lsb < 32);
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     bic(dst, dst, Operand(mask));
   } else {
@@ -373,7 +376,7 @@
 
 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
                           Condition cond) {
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     ASSERT(!dst.is(pc) && !src.rm().is(pc));
     ASSERT((satpos >= 0) && (satpos <= 31));
 
@@ -619,7 +622,7 @@
   ASSERT_EQ(dst1.code() + 1, dst2.code());
 
   // Generate two ldr instructions if ldrd is not available.
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     ldrd(dst1, dst2, src, cond);
   } else {
@@ -644,7 +647,7 @@
   ASSERT_EQ(src1.code() + 1, src2.code());
 
   // Generate two str instructions if strd is not available.
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     strd(src1, src2, dst, cond);
   } else {
@@ -1799,9 +1802,10 @@
   bind(&delete_allocated_handles);
   str(r5, MemOperand(r7, kLimitOffset));
   mov(r4, r0);
-  PrepareCallCFunction(0, r5);
+  PrepareCallCFunction(1, r5);
+  mov(r0, Operand(ExternalReference::isolate_address()));
   CallCFunction(
-      ExternalReference::delete_handle_scope_extensions(isolate()), 0);
+      ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   mov(r0, r4);
   jmp(&leave_exit_frame);
 
@@ -1902,7 +1906,7 @@
                                     Register scratch2,
                                     DwVfpRegister double_scratch,
                                     Label *not_int32) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     sub(scratch, source, Operand(kHeapObjectTag));
     vldr(double_scratch, scratch, HeapNumber::kValueOffset);
@@ -1998,7 +2002,7 @@
                                      Register scratch1,
                                      Register scratch2,
                                      CheckForInexactConversion check_inexact) {
-  ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3));
+  ASSERT(CpuFeatures::IsSupported(VFP3));
   CpuFeatures::Scope scope(VFP3);
   Register prev_fpscr = scratch1;
   Register scratch = scratch2;
@@ -2156,7 +2160,7 @@
 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                          Register src,
                                          int num_least_bits) {
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     ubfx(dst, src, kSmiTagSize, num_least_bits);
   } else {
     mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -2797,9 +2801,6 @@
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Up to four simple arguments are passed in registers r0..r3.
   int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
                                0 : num_arguments - kRegisterPassedArguments;
@@ -2836,19 +2837,6 @@
                                          ExternalReference function_reference,
                                          Register scratch,
                                          int num_arguments) {
-  // Push Isolate address as the last argument.
-  if (num_arguments < kRegisterPassedArguments) {
-    Register arg_to_reg[] = {r0, r1, r2, r3};
-    Register r = arg_to_reg[num_arguments];
-    mov(r, Operand(ExternalReference::isolate_address()));
-  } else {
-    int stack_passed_arguments = num_arguments - kRegisterPassedArguments;
-    // Push Isolate address on the stack after the arguments.
-    mov(scratch, Operand(ExternalReference::isolate_address()));
-    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  }
-  num_arguments += 1;
-
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -2911,7 +2899,7 @@
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(address, size_ + Assembler::kGap) {
+      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 2b81c08..ab5efb0 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -90,7 +90,11 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
@@ -781,7 +785,10 @@
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
 
   // ---------------------------------------------------------------------------
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 8d540d4..4bd8c80 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -116,7 +116,7 @@
 RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -347,7 +347,7 @@
     __ sub(current_input_offset(), r2, end_of_input_address());
   } else {
     ASSERT(mode_ == UC16);
-    int argument_count = 3;
+    int argument_count = 4;
     __ PrepareCallCFunction(argument_count, r2);
 
     // r0 - offset of start of capture
@@ -358,6 +358,7 @@
     //   r0: Address byte_offset1 - Address captured substring's start.
     //   r1: Address byte_offset2 - Address of current character position.
     //   r2: size_t byte_length - length of capture in bytes(!)
+    //   r3: Isolate* isolate
 
     // Address of start of capture.
     __ add(r0, r0, Operand(end_of_input_address()));
@@ -367,6 +368,8 @@
     __ mov(r4, Operand(r1));
     // Address of current input position.
     __ add(r1, current_input_offset(), Operand(end_of_input_address()));
+    // Isolate.
+    __ mov(r3, Operand(ExternalReference::isolate_address()));
 
     ExternalReference function =
         ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
@@ -778,10 +781,11 @@
     Label grow_failed;
 
     // Call GrowStack(backtrack_stackpointer(), &stack_base)
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments, r0);
     __ mov(r0, backtrack_stackpointer());
     __ add(r1, frame_pointer(), Operand(kStackHighEnd));
+    __ mov(r2, Operand(ExternalReference::isolate_address()));
     ExternalReference grow_stack =
         ExternalReference::re_grow_stack(masm_->isolate());
     __ CallCFunction(grow_stack, num_arguments);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 9936ac0..a71a4c5 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -953,7 +953,7 @@
                             Register fval,
                             Register scratch1,
                             Register scratch2) {
-  if (masm->isolate()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(s0, ival);
     __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
@@ -2048,7 +2048,7 @@
   //  -- sp[argc * 4]           : receiver
   // -----------------------------------
 
-  if (!masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+  if (!CpuFeatures::IsSupported(VFP3)) {
       return heap()->undefined_value();
   }
 
@@ -3509,7 +3509,7 @@
       __ ldr(value, MemOperand(r3, key, LSL, 1));
       break;
     case kExternalFloatArray:
-      if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         CpuFeatures::Scope scope(VFP3);
         __ add(r2, r3, Operand(key, LSL, 1));
         __ vldr(s0, r2, 0);
@@ -3548,7 +3548,7 @@
     // Now we can use r0 for the result as key is not needed any more.
     __ mov(r0, r5);
 
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       __ vmov(s0, value);
       __ vcvt_f64_s32(d0, s0);
@@ -3563,7 +3563,7 @@
     // The test is different for unsigned int values. Since we need
     // the value to be in the range of a positive smi, we can't
     // handle either of the top two bits being set in the value.
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       Label box_int, done;
       __ tst(value, Operand(0xC0000000));
@@ -3627,7 +3627,7 @@
   } else if (array_type == kExternalFloatArray) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       // Allocate a HeapNumber for the result. Don't use r0 and r1 as
       // AllocateHeapNumber clobbers all registers - also when jumping due to
@@ -3820,7 +3820,7 @@
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
     // reproducible behavior, convert these to zero.
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
 
       if (array_type == kExternalFloatArray) {
diff --git a/src/assembler.cc b/src/assembler.cc
index 0322747..ff48772 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1002,7 +1002,7 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ExternalReference ExternalReference::debug_break(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug::Break)));
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
 }
 
 
diff --git a/src/ast.cc b/src/ast.cc
index 8434357..9a263a5 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -77,20 +77,23 @@
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
       inside_with_(false),
-      is_trivial_(false) {
+      is_trivial_(false),
+      position_(RelocInfo::kNoPosition) {
   BindTo(var);
 }
 
 
 VariableProxy::VariableProxy(Handle<String> name,
                              bool is_this,
-                             bool inside_with)
+                             bool inside_with,
+                             int position)
   : name_(name),
     var_(NULL),
     is_this_(is_this),
     inside_with_(inside_with),
-    is_trivial_(false) {
-  // names must be canonicalized for fast equality checks
+    is_trivial_(false),
+    position_(position) {
+  // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
 }
 
@@ -622,24 +625,21 @@
 
 
 bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
-                               Handle<String> name) {
+                               LookupResult* lookup) {
   target_ = Handle<JSFunction>::null();
   cell_ = Handle<JSGlobalPropertyCell>::null();
-  LookupResult lookup;
-  global->Lookup(*name, &lookup);
-  if (lookup.IsProperty() &&
-      lookup.type() == NORMAL &&
-      lookup.holder() == *global) {
-    cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
-    if (cell_->value()->IsJSFunction()) {
-      Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
-      // If the function is in new space we assume it's more likely to
-      // change and thus prefer the general IC code.
-      if (!HEAP->InNewSpace(*candidate) &&
-          CanCallWithoutIC(candidate, arguments()->length())) {
-        target_ = candidate;
-        return true;
-      }
+  ASSERT(lookup->IsProperty() &&
+         lookup->type() == NORMAL &&
+         lookup->holder() == *global);
+  cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
+  if (cell_->value()->IsJSFunction()) {
+    Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
+    // If the function is in new space we assume it's more likely to
+    // change and thus prefer the general IC code.
+    if (!HEAP->InNewSpace(*candidate) &&
+        CanCallWithoutIC(candidate, arguments()->length())) {
+      target_ = candidate;
+      return true;
     }
   }
   return false;
diff --git a/src/ast.h b/src/ast.h
index e9a06ec..d8bc18e 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -1135,6 +1135,7 @@
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
   bool inside_with() const { return inside_with_; }
+  int position() const { return position_; }
 
   void MarkAsTrivial() { is_trivial_ = true; }
 
@@ -1147,8 +1148,12 @@
   bool is_this_;
   bool inside_with_;
   bool is_trivial_;
+  int position_;
 
-  VariableProxy(Handle<String> name, bool is_this, bool inside_with);
+  VariableProxy(Handle<String> name,
+                bool is_this,
+                bool inside_with,
+                int position = RelocInfo::kNoPosition);
   explicit VariableProxy(bool is_this);
 
   friend class Scope;
@@ -1316,7 +1321,7 @@
   Handle<JSGlobalPropertyCell> cell() { return cell_; }
 
   bool ComputeTarget(Handle<Map> type, Handle<String> name);
-  bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
+  bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
 
   // Bailout support.
   int ReturnId() const { return return_id_; }
@@ -1743,7 +1748,6 @@
         contains_loops_(contains_loops),
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(HEAP->empty_string()),
-        try_full_codegen_(false),
         pretenure_(false) { }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1781,9 +1785,6 @@
     inferred_name_ = inferred_name;
   }
 
-  bool try_full_codegen() { return try_full_codegen_; }
-  void set_try_full_codegen(bool flag) { try_full_codegen_ = flag; }
-
   bool pretenure() { return pretenure_; }
   void set_pretenure(bool value) { pretenure_ = value; }
 
@@ -1803,7 +1804,6 @@
   bool strict_mode_;
   int function_token_position_;
   Handle<String> inferred_name_;
-  bool try_full_codegen_;
   bool pretenure_;
 };
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 9c9bac7..a30ffc0 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -400,19 +400,22 @@
 
   // Please note that the prototype property for function instances must be
   // writable.
-  global_context()->set_function_instance_map(
-      *CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE));
+  Handle<Map> function_instance_map =
+      CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+  global_context()->set_function_instance_map(*function_instance_map);
 
   // Functions with this map will not have a 'prototype' property, and
   // can not be used as constructors.
+  Handle<Map> function_without_prototype_map =
+      CreateFunctionMap(DONT_ADD_PROTOTYPE);
   global_context()->set_function_without_prototype_map(
-      *CreateFunctionMap(DONT_ADD_PROTOTYPE));
+      *function_without_prototype_map);
 
   // Allocate the function map. This map is temporary, used only for processing
   // of builtins.
   // Later the map is replaced with writable prototype map, allocated below.
-  global_context()->set_function_map(
-      *CreateFunctionMap(ADD_READONLY_PROTOTYPE));
+  Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
+  global_context()->set_function_map(*function_map);
 
   // The final map for functions. Writeable prototype.
   // This map is installed in MakeFunctionInstancePrototypeWritable.
@@ -474,8 +477,6 @@
   function_instance_map_writable_prototype_->set_prototype(*empty_function);
 
   // Allocate the function map first and then patch the prototype later
-  Handle<Map> function_without_prototype_map(
-      global_context()->function_without_prototype_map());
   Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
       function_without_prototype_map);
   empty_fm->set_instance_descriptors(
@@ -578,21 +579,27 @@
   Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
   // Allocate map for the strict mode function instances.
+  Handle<Map> strict_mode_function_instance_map =
+      CreateStrictModeFunctionMap(
+          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_instance_map(
-      *CreateStrictModeFunctionMap(
-          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller));
+      *strict_mode_function_instance_map);
 
   // Allocate map for the prototype-less strict mode instances.
+  Handle<Map> strict_mode_function_without_prototype_map =
+      CreateStrictModeFunctionMap(
+          DONT_ADD_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_without_prototype_map(
-      *CreateStrictModeFunctionMap(
-          DONT_ADD_PROTOTYPE, empty, arguments, caller));
+      *strict_mode_function_without_prototype_map);
 
   // Allocate map for the strict mode functions. This map is temporary, used
   // only for processing of builtins.
   // Later the map is replaced with writable prototype map, allocated below.
+  Handle<Map> strict_mode_function_map =
+      CreateStrictModeFunctionMap(
+          ADD_READONLY_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_map(
-      *CreateStrictModeFunctionMap(
-          ADD_READONLY_PROTOTYPE, empty, arguments, caller));
+      *strict_mode_function_map);
 
   // The final map for the strict mode functions. Writeable prototype.
   // This map is installed in MakeFunctionInstancePrototypeWritable.
@@ -1239,10 +1246,11 @@
 }
 
 
-#define INSTALL_NATIVE(Type, name, var)                                     \
-  Handle<String> var##_name = factory->LookupAsciiSymbol(name);             \
-  global_context()->set_##var(Type::cast(                                   \
-      global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name)));
+#define INSTALL_NATIVE(Type, name, var)                                        \
+  Handle<String> var##_name = factory->LookupAsciiSymbol(name);                \
+  Object* var##_native =                                                       \
+      global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name); \
+  global_context()->set_##var(Type::cast(var##_native));
 
 
 void Genesis::InstallNativeFunctions() {
diff --git a/src/builtins.cc b/src/builtins.cc
index 72f9d57..ae3dab4 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1594,10 +1594,11 @@
 
 void Builtins::Setup(bool create_heap_objects) {
   ASSERT(!initialized_);
-  Heap* heap = Isolate::Current()->heap();
+  Isolate* isolate = Isolate::Current();
+  Heap* heap = isolate->heap();
 
   // Create a scope for the handles in the builtins.
-  HandleScope scope;
+  HandleScope scope(isolate);
 
   const BuiltinDesc* functions = BuiltinFunctionTable::functions();
 
@@ -1609,7 +1610,7 @@
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(buffer, sizeof buffer);
+      MacroAssembler masm(isolate, buffer, sizeof buffer);
       // Generate the code/adaptor.
       typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
       Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
@@ -1634,7 +1635,7 @@
         }
       }
       // Log the event and add the code to the builtins array.
-      PROFILE(ISOLATE,
+      PROFILE(isolate,
               CodeCreateEvent(Logger::BUILTIN_TAG,
                               Code::cast(code),
                               functions[i].s_name));
diff --git a/src/checks.h b/src/checks.h
index 2bb94bb..a560b2f 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -271,6 +271,8 @@
 #define ASSERT_EQ(v1, v2)    CHECK_EQ(v1, v2)
 #define ASSERT_NE(v1, v2)    CHECK_NE(v1, v2)
 #define ASSERT_GE(v1, v2)    CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2)    CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2)    CHECK_LE(v1, v2)
 #define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
 #else
 #define ASSERT_RESULT(expr)     (expr)
@@ -278,6 +280,8 @@
 #define ASSERT_EQ(v1, v2)      ((void) 0)
 #define ASSERT_NE(v1, v2)      ((void) 0)
 #define ASSERT_GE(v1, v2)      ((void) 0)
+#define ASSERT_LT(v1, v2)      ((void) 0)
+#define ASSERT_LE(v1, v2)      ((void) 0)
 #define SLOW_ASSERT(condition) ((void) 0)
 #endif
 // Static asserts has no impact on runtime performance, so they can be
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 2ecd336..f680c60 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -95,7 +95,7 @@
     HandleScope scope(isolate);
 
     // Generate the new code.
-    MacroAssembler masm(NULL, 256);
+    MacroAssembler masm(isolate, NULL, 256);
     GenerateCode(&masm);
 
     // Create the code object.
@@ -132,7 +132,7 @@
   Code* code;
   if (!FindCodeInCache(&code)) {
     // Generate the new code.
-    MacroAssembler masm(NULL, 256);
+    MacroAssembler masm(Isolate::Current(), NULL, 256);
     GenerateCode(&masm);
     Heap* heap = masm.isolate()->heap();
 
diff --git a/src/codegen.cc b/src/codegen.cc
index 03f64a1..d2e7f23 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -248,7 +248,7 @@
   MakeCodePrologue(info);
   // Generate code.
   const int kInitialBufferSize = 4 * KB;
-  MacroAssembler masm(NULL, kInitialBufferSize);
+  MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
 #ifdef ENABLE_GDB_JIT_INTERFACE
   masm.positions_recorder()->StartGDBJITLineInfoRecording();
 #endif
diff --git a/src/compiler.cc b/src/compiler.cc
index 1ec4414..dea94fa 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -326,30 +326,9 @@
 
   if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
     if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
-
-    // Generate code and return it.  Code generator selection is governed by
-    // which backends are enabled and whether the function is considered
-    // run-once code or not.
-    //
-    // --full-compiler enables the dedicated backend for code we expect to
-    // be run once
-    //
-    // The normal choice of backend can be overridden with the flags
-    // --always-full-compiler.
-    if (Rewriter::Analyze(info)) {
-      Handle<SharedFunctionInfo> shared = info->shared_info();
-      bool is_run_once = (shared.is_null())
-          ? info->scope()->is_global_scope()
-          : (shared->is_toplevel() || shared->try_full_codegen());
-      bool can_use_full =
-          FLAG_full_compiler && !info->function()->contains_loops();
-      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
-        return FullCodeGenerator::MakeCode(info);
-      } else {
-        return AssignedVariablesAnalyzer::Analyze(info) &&
-            CodeGenerator::MakeCode(info);
-      }
-    }
+    // If crankshaft is not supported fall back to full code generator
+    // for all compilation.
+    return FullCodeGenerator::MakeCode(info);
   }
 
   return false;
@@ -721,35 +700,12 @@
   if (FLAG_lazy && allow_lazy) {
     Handle<Code> code = info.isolate()->builtins()->LazyCompile();
     info.SetCode(code);
-  } else {
-    if (V8::UseCrankshaft()) {
-      if (!MakeCrankshaftCode(&info)) {
-        return Handle<SharedFunctionInfo>::null();
-      }
-    } else {
-      // The bodies of function literals have not yet been visited by the
-      // AST optimizer/analyzer.
-      if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-
-      bool is_run_once = literal->try_full_codegen();
-      bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
-
-      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
-        if (!FullCodeGenerator::MakeCode(&info)) {
-          return Handle<SharedFunctionInfo>::null();
-        }
-      } else {
-        // We fall back to the classic V8 code generator.
-        if (!AssignedVariablesAnalyzer::Analyze(&info) ||
-            !CodeGenerator::MakeCode(&info)) {
-          return Handle<SharedFunctionInfo>::null();
-        }
-      }
-    }
+  } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
+             (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-
-    // Function compilation complete.
     scope_info = SerializedScopeInfo::Create(info.scope());
+  } else {
+    return Handle<SharedFunctionInfo>::null();
   }
 
   // Create a shared function info object.
@@ -791,7 +747,6 @@
   function_info->SetThisPropertyAssignmentsInfo(
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
-  function_info->set_try_full_codegen(lit->try_full_codegen());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
   function_info->set_strict_mode(lit->strict_mode());
 }
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index a7fffe0..b704417 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -70,6 +70,7 @@
   // Init the required fields only.
   result->sample.pc = NULL;
   result->sample.frames_count = 0;
+  result->sample.has_external_callback = false;
   return result;
 }
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index ef51950..082e253 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -187,6 +187,7 @@
   sample->state = Isolate::Current()->current_vm_state();
   sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
   sample->tos = NULL;
+  sample->has_external_callback = false;
   sample->frames_count = 0;
   for (StackTraceFrameIterator it;
        !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
diff --git a/src/cpu.h b/src/cpu.h
index ddc402f..e307302 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -53,6 +53,8 @@
   // Initializes the cpu architecture support. Called once at VM startup.
   static void Setup();
 
+  static bool SupportsCrankshaft();
+
   // Flush instruction cache.
   static void FlushICache(void* start, size_t size);
 
diff --git a/src/debug.cc b/src/debug.cc
index bc532ef..8edd16e 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -917,24 +917,20 @@
 }
 
 
-// This remains a static method so that generated code can call it.
-Object* Debug::Break(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
-
-  Debug* debug = isolate->debug();
-  Heap* heap = isolate->heap();
-  HandleScope scope(isolate);
+Object* Debug::Break(Arguments args) {
+  Heap* heap = isolate_->heap();
+  HandleScope scope(isolate_);
   ASSERT(args.length() == 0);
 
-  debug->thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
+  thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
 
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it;
   JavaScriptFrame* frame = it.frame();
 
   // Just continue if breaks are disabled or debugger cannot be loaded.
-  if (debug->disable_break() || !debug->Load()) {
-    debug->SetAfterBreakTarget(frame);
+  if (disable_break() || !Load()) {
+    SetAfterBreakTarget(frame);
     return heap->undefined_value();
   }
 
@@ -945,7 +941,7 @@
   }
 
   // Postpone interrupt during breakpoint processing.
-  PostponeInterruptsScope postpone(isolate);
+  PostponeInterruptsScope postpone(isolate_);
 
   // Get the debug info (create it if it does not exist).
   Handle<SharedFunctionInfo> shared =
@@ -958,10 +954,10 @@
   break_location_iterator.FindBreakLocationFromAddress(frame->pc());
 
   // Check whether step next reached a new statement.
-  if (!debug->StepNextContinue(&break_location_iterator, frame)) {
+  if (!StepNextContinue(&break_location_iterator, frame)) {
     // Decrease steps left if performing multiple steps.
-    if (debug->thread_local_.step_count_ > 0) {
-      debug->thread_local_.step_count_--;
+    if (thread_local_.step_count_ > 0) {
+      thread_local_.step_count_--;
     }
   }
 
@@ -971,56 +967,55 @@
   if (break_location_iterator.HasBreakPoint()) {
     Handle<Object> break_point_objects =
         Handle<Object>(break_location_iterator.BreakPointObjects());
-    break_points_hit = debug->CheckBreakPoints(break_point_objects);
+    break_points_hit = CheckBreakPoints(break_point_objects);
   }
 
   // If step out is active skip everything until the frame where we need to step
   // out to is reached, unless real breakpoint is hit.
-  if (debug->StepOutActive() && frame->fp() != debug->step_out_fp() &&
+  if (StepOutActive() && frame->fp() != step_out_fp() &&
       break_points_hit->IsUndefined() ) {
       // Step count should always be 0 for StepOut.
-      ASSERT(debug->thread_local_.step_count_ == 0);
+      ASSERT(thread_local_.step_count_ == 0);
   } else if (!break_points_hit->IsUndefined() ||
-             (debug->thread_local_.last_step_action_ != StepNone &&
-              debug->thread_local_.step_count_ == 0)) {
+             (thread_local_.last_step_action_ != StepNone &&
+              thread_local_.step_count_ == 0)) {
     // Notify debugger if a real break point is triggered or if performing
     // single stepping with no more steps to perform. Otherwise do another step.
 
     // Clear all current stepping setup.
-    debug->ClearStepping();
+    ClearStepping();
 
     // Notify the debug event listeners.
-    isolate->debugger()->OnDebugBreak(break_points_hit, false);
-  } else if (debug->thread_local_.last_step_action_ != StepNone) {
+    isolate_->debugger()->OnDebugBreak(break_points_hit, false);
+  } else if (thread_local_.last_step_action_ != StepNone) {
     // Hold on to last step action as it is cleared by the call to
     // ClearStepping.
-    StepAction step_action = debug->thread_local_.last_step_action_;
-    int step_count = debug->thread_local_.step_count_;
+    StepAction step_action = thread_local_.last_step_action_;
+    int step_count = thread_local_.step_count_;
 
     // Clear all current stepping setup.
-    debug->ClearStepping();
+    ClearStepping();
 
     // Set up for the remaining steps.
-    debug->PrepareStep(step_action, step_count);
+    PrepareStep(step_action, step_count);
   }
 
-  if (debug->thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
-    debug->SetAfterBreakTarget(frame);
-  } else if (debug->thread_local_.frame_drop_mode_ ==
+  if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
+    SetAfterBreakTarget(frame);
+  } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_IC_CALL) {
     // We must have been calling IC stub. Do not go there anymore.
-    Code* plain_return =
-        Isolate::Current()->builtins()->builtin(
-            Builtins::kPlainReturn_LiveEdit);
-    debug->thread_local_.after_break_target_ = plain_return->entry();
-  } else if (debug->thread_local_.frame_drop_mode_ ==
+    Code* plain_return = isolate_->builtins()->builtin(
+        Builtins::kPlainReturn_LiveEdit);
+    thread_local_.after_break_target_ = plain_return->entry();
+  } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
     // Debug break slot stub does not return normally, instead it manually
     // cleans the stack and jumps. We should patch the jump address.
-    Code* plain_return = Isolate::Current()->builtins()->builtin(
+    Code* plain_return = isolate_->builtins()->builtin(
         Builtins::kFrameDropper_LiveEdit);
-    debug->thread_local_.after_break_target_ = plain_return->entry();
-  } else if (debug->thread_local_.frame_drop_mode_ ==
+    thread_local_.after_break_target_ = plain_return->entry();
+  } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_DIRECT_CALL) {
     // Nothing to do, after_break_target is not used here.
   } else {
@@ -1031,6 +1026,11 @@
 }
 
 
+RUNTIME_FUNCTION(Object*, Debug_Break) {
+  return isolate->debug()->Break(args);
+}
+
+
 // Check the break point objects for whether one or more are actually
 // triggered. This function returns a JSArray with the break point objects
 // which is triggered.
diff --git a/src/debug.h b/src/debug.h
index d512595..fb60cc4 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -228,7 +228,7 @@
   void PreemptionWhileInDebugger();
   void Iterate(ObjectVisitor* v);
 
-  static Object* Break(RUNTIME_CALLING_CONVENTION);
+  Object* Break(Arguments args);
   void SetBreakPoint(Handle<SharedFunctionInfo> shared,
                      Handle<Object> break_point_object,
                      int* source_position);
@@ -548,6 +548,9 @@
 };
 
 
+DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break);
+
+
 // Message delivered to the message handler callback. This is either a debugger
 // event or the response to a command.
 class MessageImpl: public v8::Debug::Message {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 4372af0..0fed391 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -199,8 +199,7 @@
 }
 
 
-void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer,
-                                      Isolate* isolate) {
+void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
   deoptimizer->DoComputeOutputFrames();
 }
 
@@ -934,7 +933,7 @@
   // isn't meant to be serialized at all.
   ASSERT(!Serializer::enabled());
 
-  MacroAssembler masm(NULL, 16 * KB);
+  MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
   masm.set_emit_debug_code(false);
   GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
   CodeDesc desc;
@@ -1195,4 +1194,103 @@
 }
 
 
+// We can't intermix stack decoding and allocations because
+// deoptimization infrastracture is not GC safe.
+// Thus we build a temporary structure in malloced space.
+SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
+                                            DeoptimizationInputData* data,
+                                            JavaScriptFrame* frame) {
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+      // Peeled off before getting here.
+      break;
+
+    case Translation::ARGUMENTS_OBJECT:
+      // This can be only emitted for local slots not for argument slots.
+      break;
+
+    case Translation::REGISTER:
+    case Translation::INT32_REGISTER:
+    case Translation::DOUBLE_REGISTER:
+    case Translation::DUPLICATE:
+      // We are at safepoint which corresponds to call.  All registers are
+      // saved by caller so there would be no live registers at this
+      // point. Thus these translation commands should not be used.
+      break;
+
+    case Translation::STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::TAGGED);
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::INT32);
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::DOUBLE);
+    }
+
+    case Translation::LITERAL: {
+      int literal_index = iterator->Next();
+      return SlotRef(data->LiteralArray()->get(literal_index));
+    }
+  }
+
+  UNREACHABLE();
+  return SlotRef();
+}
+
+
+void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                             int inlined_frame_index,
+                                             Vector<SlotRef>* args_slots) {
+  AssertNoAllocation no_gc;
+  int deopt_index = AstNode::kNoNumber;
+  DeoptimizationInputData* data =
+      static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+  USE(frame_count);
+  ASSERT(frame_count > inlined_frame_index);
+  int frames_to_skip = inlined_frame_index;
+  while (true) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+    // Skip over operands to advance to the next opcode.
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+    if (opcode == Translation::FRAME) {
+      if (frames_to_skip == 0) {
+        // We reached the frame corresponding to the inlined function
+        // in question.  Process the translation commands for the
+        // arguments.
+        //
+        // Skip the translation command for the receiver.
+        it.Skip(Translation::NumberOfOperandsFor(
+            static_cast<Translation::Opcode>(it.Next())));
+        // Compute slots for arguments.
+        for (int i = 0; i < args_slots->length(); ++i) {
+          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
+        }
+        return;
+      }
+      frames_to_skip--;
+    }
+  }
+
+  UNREACHABLE();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index a53de3d..514de05 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -192,7 +192,7 @@
 
   void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
 
-  static void ComputeOutputFrames(Deoptimizer* deoptimizer, Isolate* isolate);
+  static void ComputeOutputFrames(Deoptimizer* deoptimizer);
 
   static Address GetDeoptimizationEntry(int id, BailoutType type);
   static int GetDeoptimizationId(Address addr, BailoutType type);
@@ -552,6 +552,78 @@
 };
 
 
+class SlotRef BASE_EMBEDDED {
+ public:
+  enum SlotRepresentation {
+    UNKNOWN,
+    TAGGED,
+    INT32,
+    DOUBLE,
+    LITERAL
+  };
+
+  SlotRef()
+      : addr_(NULL), representation_(UNKNOWN) { }
+
+  SlotRef(Address addr, SlotRepresentation representation)
+      : addr_(addr), representation_(representation) { }
+
+  explicit SlotRef(Object* literal)
+      : literal_(literal), representation_(LITERAL) { }
+
+  Handle<Object> GetValue() {
+    switch (representation_) {
+      case TAGGED:
+        return Handle<Object>(Memory::Object_at(addr_));
+
+      case INT32: {
+        int value = Memory::int32_at(addr_);
+        if (Smi::IsValid(value)) {
+          return Handle<Object>(Smi::FromInt(value));
+        } else {
+          return Isolate::Current()->factory()->NewNumberFromInt(value);
+        }
+      }
+
+      case DOUBLE: {
+        double value = Memory::double_at(addr_);
+        return Isolate::Current()->factory()->NewNumber(value);
+      }
+
+      case LITERAL:
+        return literal_;
+
+      default:
+        UNREACHABLE();
+        return Handle<Object>::null();
+    }
+  }
+
+  static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                             int inlined_frame_index,
+                                             Vector<SlotRef>* args_slots);
+
+ private:
+  Address addr_;
+  Handle<Object> literal_;
+  SlotRepresentation representation_;
+
+  static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
+    if (slot_index >= 0) {
+      const int offset = JavaScriptFrameConstants::kLocal0Offset;
+      return frame->fp() + offset - (slot_index * kPointerSize);
+    } else {
+      const int offset = JavaScriptFrameConstants::kLastParameterOffset;
+      return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
+    }
+  }
+
+  static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
+                                            DeoptimizationInputData* data,
+                                            JavaScriptFrame* frame);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_DEOPTIMIZER_H_
diff --git a/src/execution.cc b/src/execution.cc
index 98c8b68..ad4466e 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -199,6 +199,8 @@
 
 Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
   ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
 
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a function.
@@ -206,7 +208,7 @@
   // Regular expressions can be called as functions in both Firefox
   // and Safari so we allow it too.
   if (object->IsJSRegExp()) {
-    Handle<String> exec = FACTORY->exec_symbol();
+    Handle<String> exec = factory->exec_symbol();
     // TODO(lrn): Bug 617.  We should use the default function here, not the
     // one on the RegExp object.
     Object* exec_function;
@@ -214,7 +216,7 @@
       // This can lose an exception, but the alternative is to put a failure
       // object in a handle, which is not GC safe.
       if (!maybe_exec_function->ToObject(&exec_function)) {
-        return FACTORY->undefined_value();
+        return factory->undefined_value();
       }
     }
     return Handle<Object>(exec_function);
@@ -225,15 +227,16 @@
   if (object->IsHeapObject() &&
       HeapObject::cast(*object)->map()->has_instance_call_handler()) {
     return Handle<JSFunction>(
-        Isolate::Current()->global_context()->call_as_function_delegate());
+        isolate->global_context()->call_as_function_delegate());
   }
 
-  return FACTORY->undefined_value();
+  return factory->undefined_value();
 }
 
 
 Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
   ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
 
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a constructor.
@@ -243,10 +246,10 @@
   if (object->IsHeapObject() &&
       HeapObject::cast(*object)->map()->has_instance_call_handler()) {
     return Handle<JSFunction>(
-        Isolate::Current()->global_context()->call_as_constructor_delegate());
+        isolate->global_context()->call_as_constructor_delegate());
   }
 
-  return FACTORY->undefined_value();
+  return isolate->factory()->undefined_value();
 }
 
 
@@ -467,10 +470,11 @@
 
 #define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception)            \
   do {                                                                         \
+    Isolate* isolate = Isolate::Current();                                     \
     Object** args[argc] = argv;                                                \
     ASSERT(has_pending_exception != NULL);                                     \
-    return Call(Isolate::Current()->name##_fun(),                              \
-                Isolate::Current()->js_builtins_object(), argc, args,          \
+    return Call(isolate->name##_fun(),                                         \
+                isolate->js_builtins_object(), argc, args,                     \
                 has_pending_exception);                                        \
   } while (false)
 
@@ -549,20 +553,23 @@
 
 
 Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
+  Isolate* isolate = string->GetIsolate();
+  Factory* factory = isolate->factory();
+
   int int_index = static_cast<int>(index);
   if (int_index < 0 || int_index >= string->length()) {
-    return FACTORY->undefined_value();
+    return factory->undefined_value();
   }
 
   Handle<Object> char_at =
-      GetProperty(Isolate::Current()->js_builtins_object(),
-                  FACTORY->char_at_symbol());
+      GetProperty(isolate->js_builtins_object(),
+                  factory->char_at_symbol());
   if (!char_at->IsJSFunction()) {
-    return FACTORY->undefined_value();
+    return factory->undefined_value();
   }
 
   bool caught_exception;
-  Handle<Object> index_object = FACTORY->NewNumberFromInt(int_index);
+  Handle<Object> index_object = factory->NewNumberFromInt(int_index);
   Object** index_arg[] = { index_object.location() };
   Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
                                   string,
@@ -570,7 +577,7 @@
                                   index_arg,
                                   &caught_exception);
   if (caught_exception) {
-    return FACTORY->undefined_value();
+    return factory->undefined_value();
   }
   return result;
 }
@@ -578,17 +585,18 @@
 
 Handle<JSFunction> Execution::InstantiateFunction(
     Handle<FunctionTemplateInfo> data, bool* exc) {
+  Isolate* isolate = data->GetIsolate();
   // Fast case: see if the function has already been instantiated
   int serial_number = Smi::cast(data->serial_number())->value();
   Object* elm =
-      Isolate::Current()->global_context()->function_cache()->
+      isolate->global_context()->function_cache()->
           GetElementNoExceptionThrown(serial_number);
   if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
   // The function has not yet been instantiated in this context; do it.
   Object** args[1] = { Handle<Object>::cast(data).location() };
   Handle<Object> result =
-      Call(Isolate::Current()->instantiate_fun(),
-           Isolate::Current()->js_builtins_object(), 1, args, exc);
+      Call(isolate->instantiate_fun(),
+           isolate->js_builtins_object(), 1, args, exc);
   if (*exc) return Handle<JSFunction>::null();
   return Handle<JSFunction>::cast(result);
 }
@@ -596,12 +604,13 @@
 
 Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
                                               bool* exc) {
+  Isolate* isolate = data->GetIsolate();
   if (data->property_list()->IsUndefined() &&
       !data->constructor()->IsUndefined()) {
     // Initialization to make gcc happy.
     Object* result = NULL;
     {
-      HandleScope scope;
+      HandleScope scope(isolate);
       Handle<FunctionTemplateInfo> cons_template =
           Handle<FunctionTemplateInfo>(
               FunctionTemplateInfo::cast(data->constructor()));
@@ -616,8 +625,8 @@
   } else {
     Object** args[1] = { Handle<Object>::cast(data).location() };
     Handle<Object> result =
-        Call(Isolate::Current()->instantiate_fun(),
-             Isolate::Current()->js_builtins_object(), 1, args, exc);
+        Call(isolate->instantiate_fun(),
+             isolate->js_builtins_object(), 1, args, exc);
     if (*exc) return Handle<JSObject>::null();
     return Handle<JSObject>::cast(result);
   }
@@ -627,9 +636,10 @@
 void Execution::ConfigureInstance(Handle<Object> instance,
                                   Handle<Object> instance_template,
                                   bool* exc) {
+  Isolate* isolate = Isolate::Current();
   Object** args[2] = { instance.location(), instance_template.location() };
-  Execution::Call(Isolate::Current()->configure_instance_fun(),
-                  Isolate::Current()->js_builtins_object(), 2, args, exc);
+  Execution::Call(isolate->configure_instance_fun(),
+                  isolate->js_builtins_object(), 2, args, exc);
 }
 
 
@@ -637,6 +647,7 @@
                                             Handle<JSFunction> fun,
                                             Handle<Object> pos,
                                             Handle<Object> is_global) {
+  Isolate* isolate = fun->GetIsolate();
   const int argc = 4;
   Object** args[argc] = { recv.location(),
                           Handle<Object>::cast(fun).location(),
@@ -644,10 +655,13 @@
                           is_global.location() };
   bool caught_exception = false;
   Handle<Object> result =
-      TryCall(Isolate::Current()->get_stack_trace_line_fun(),
-              Isolate::Current()->js_builtins_object(), argc, args,
+      TryCall(isolate->get_stack_trace_line_fun(),
+              isolate->js_builtins_object(), argc, args,
               &caught_exception);
-  if (caught_exception || !result->IsString()) return FACTORY->empty_symbol();
+  if (caught_exception || !result->IsString()) {
+      return isolate->factory()->empty_symbol();
+  }
+
   return Handle<String>::cast(result);
 }
 
@@ -728,10 +742,11 @@
 }
 
 void Execution::ProcessDebugMesssages(bool debug_command_only) {
+  Isolate* isolate = Isolate::Current();
   // Clear the debug command request flag.
-  Isolate::Current()->stack_guard()->Continue(DEBUGCOMMAND);
+  isolate->stack_guard()->Continue(DEBUGCOMMAND);
 
-  HandleScope scope;
+  HandleScope scope(isolate);
   // Enter the debugger. Just continue if we fail to enter the debugger.
   EnterDebugger debugger;
   if (debugger.FailedToEnter()) {
@@ -740,8 +755,8 @@
 
   // Notify the debug event listeners. Indicate auto continue if the break was
   // a debug command break.
-  Isolate::Current()->debugger()->OnDebugBreak(FACTORY->undefined_value(),
-                                               debug_command_only);
+  isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(),
+                                    debug_command_only);
 }
 
 
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
index 6f574d4..e8baea7 100644
--- a/src/extensions/experimental/break-iterator.cc
+++ b/src/extensions/experimental/break-iterator.cc
@@ -46,16 +46,16 @@
   return NULL;
 }
 
-UnicodeString* BreakIterator::ResetAdoptedText(
+icu::UnicodeString* BreakIterator::ResetAdoptedText(
     v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
   // Get the previous value from the internal field.
-  UnicodeString* text = static_cast<UnicodeString*>(
+  icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
       obj->GetPointerFromInternalField(1));
   delete text;
 
   // Assign new value to the internal pointer.
   v8::String::Value text_value(value);
-  text = new UnicodeString(
+  text = new icu::UnicodeString(
       reinterpret_cast<const UChar*>(*text_value), text_value.length());
   obj->SetPointerInInternalField(1, text);
 
@@ -74,7 +74,7 @@
   // pointing to a break iterator.
   delete UnpackBreakIterator(persistent_object);
 
-  delete static_cast<UnicodeString*>(
+  delete static_cast<icu::UnicodeString*>(
       persistent_object->GetPointerFromInternalField(1));
 
   // Then dispose of the persistent handle to JS object.
@@ -144,8 +144,9 @@
   }
 
   // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
-  int32_t status =
-      static_cast<RuleBasedBreakIterator*>(break_iterator)->getRuleStatus();
+  icu::RuleBasedBreakIterator* rule_based_iterator =
+      static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+  int32_t status = rule_based_iterator->getRuleStatus();
   // Keep return values in sync with JavaScript BreakType enum.
   if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
     return v8::Int32::New(UBRK_WORD_NONE);
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
index 473bc89..fac1ed8 100644
--- a/src/extensions/experimental/break-iterator.h
+++ b/src/extensions/experimental/break-iterator.h
@@ -51,8 +51,8 @@
 
   // Deletes the old value and sets the adopted text in
   // corresponding JavaScript object.
-  static UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
-                                         v8::Handle<v8::Value> text_value);
+  static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
+                                              v8::Handle<v8::Value> text_value);
 
   // Release memory we allocated for the BreakIterator once the JS object that
   // holds the pointer gets garbage collected.
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
index e65fdcc..f14fd9e 100644
--- a/src/extensions/experimental/i18n-extension.cc
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -167,7 +167,7 @@
   v8::Local<v8::Array> all_locales = v8::Array::New();
 
   int count = 0;
-  const Locale* icu_locales = icu::Locale::getAvailableLocales(count);
+  const icu::Locale* icu_locales = icu::Locale::getAvailableLocales(count);
   for (int i = 0; i < count; ++i) {
     all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
   }
@@ -230,7 +230,7 @@
   icu::Locale icu_locale(base_locale.c_str());
   icu::Locale display_locale =
       icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
-  UnicodeString result;
+  icu::UnicodeString result;
   if (item == "language") {
     icu_locale.getDisplayLanguage(display_locale, result);
   } else if (item == "script") {
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0bc6409..d6cb6e3 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
diff --git a/src/frames.cc b/src/frames.cc
index 79aa250..8acc1e6 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -617,7 +617,7 @@
 
 int JavaScriptFrame::ComputeParametersCount() const {
   Address base  = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
-  Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
+  Address limit = fp() + JavaScriptFrameConstants::kLastParameterOffset;
   return static_cast<int>((base - limit) / kPointerSize);
 }
 
@@ -1084,7 +1084,7 @@
 
 void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
   // Traverse callee-saved registers, receiver, and parameters.
-  const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
+  const int kBaseOffset = JavaScriptFrameConstants::kLastParameterOffset;
   const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
   Object** base = &Memory::Object_at(fp() + kBaseOffset);
   Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
diff --git a/src/frames.h b/src/frames.h
index bee95cc..3294eee 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -430,7 +430,7 @@
   Handle<Object> receiver() { return receiver_; }
   Handle<JSFunction> function() { return function_; }
   Handle<Code> code() { return code_; }
-  Address pc() { return reinterpret_cast<Address>(*code_) + offset_; }
+  Address pc() { return code_->address() + offset_; }
   int offset() { return offset_; }
   bool is_constructor() { return is_constructor_; }
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index d509cd5..b896fc8 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -286,7 +286,7 @@
   }
   CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
-  MacroAssembler masm(NULL, kInitialBufferSize);
+  MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
 #ifdef ENABLE_GDB_JIT_INTERFACE
   masm.positions_recorder()->StartGDBJITLineInfoRecording();
 #endif
diff --git a/src/heap.cc b/src/heap.cc
index 5d1a66e..6250172 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -941,6 +941,8 @@
 
   gc_state_ = SCAVENGE;
 
+  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
   Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
 #ifdef DEBUG
   VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
@@ -1232,6 +1234,32 @@
 }
 
 
+enum LoggingAndProfiling {
+  LOGGING_AND_PROFILING_ENABLED,
+  LOGGING_AND_PROFILING_DISABLED
+};
+
+
+typedef void (*ScavengingCallback)(Map* map,
+                                   HeapObject** slot,
+                                   HeapObject* object);
+
+
+static Atomic32 scavenging_visitors_table_mode_;
+static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+
+INLINE(static void DoScavengeObject(Map* map,
+                                    HeapObject** slot,
+                                    HeapObject* obj));
+
+
+void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+}
+
+
+template<LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1240,23 +1268,22 @@
     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
     table_.Register(kVisitByteArray, &EvacuateByteArray);
     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+
     table_.Register(kVisitGlobalContext,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        VisitSpecialized<Context::kSize>);
-
-    typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
+                        template VisitSpecialized<Context::kSize>);
 
     table_.Register(kVisitConsString,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        VisitSpecialized<ConsString::kSize>);
+                        template VisitSpecialized<ConsString::kSize>);
 
     table_.Register(kVisitSharedFunctionInfo,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        VisitSpecialized<SharedFunctionInfo::kSize>);
+                        template VisitSpecialized<SharedFunctionInfo::kSize>);
 
     table_.Register(kVisitJSFunction,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    VisitSpecialized<JSFunction::kSize>);
+                        template VisitSpecialized<JSFunction::kSize>);
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1271,12 +1298,10 @@
                                    kVisitStructGeneric>();
   }
 
-
-  static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
-    table_.GetVisitor(map)(map, slot, obj);
+  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+    return &table_;
   }
 
-
  private:
   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
@@ -1313,21 +1338,24 @@
     // Set the forwarding address.
     source->set_map_word(MapWord::FromForwardingAddress(target));
 
+    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-    // Update NewSpace stats if necessary.
-    RecordCopiedObject(heap, target);
+      // Update NewSpace stats if necessary.
+      RecordCopiedObject(heap, target);
 #endif
-    HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
+      HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
 #if defined(ENABLE_LOGGING_AND_PROFILING)
-    Isolate* isolate = heap->isolate();
-    if (isolate->logger()->is_logging() ||
-        isolate->cpu_profiler()->is_profiling()) {
-      if (target->IsSharedFunctionInfo()) {
-        PROFILE(isolate, SharedFunctionInfoMoveEvent(
-            source->address(), target->address()));
+      Isolate* isolate = heap->isolate();
+      if (isolate->logger()->is_logging() ||
+          isolate->cpu_profiler()->is_profiling()) {
+        if (target->IsSharedFunctionInfo()) {
+          PROFILE(isolate, SharedFunctionInfoMoveEvent(
+              source->address(), target->address()));
+        }
       }
-    }
 #endif
+    }
+
     return target;
   }
 
@@ -1443,7 +1471,7 @@
         return;
       }
 
-      Scavenge(first->map(), slot, first);
+      DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1470,13 +1498,51 @@
     }
   };
 
-  typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
-
-  static VisitorDispatchTable<Callback> table_;
+  static VisitorDispatchTable<ScavengingCallback> table_;
 };
 
 
-VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
+template<LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+    ScavengingVisitor<logging_and_profiling_mode>::table_;
+
+
+static void InitializeScavengingVisitorsTables() {
+  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  scavenging_visitors_table_.CopyFrom(
+      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
+  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+}
+
+
+void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
+  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
+    // Table was already updated by some isolate.
+    return;
+  }
+
+  if (isolate()->logger()->is_logging() ||
+      isolate()->cpu_profiler()->is_profiling() ||
+      (isolate()->heap_profiler() != NULL &&
+       isolate()->heap_profiler()->is_profiling())) {
+    // If one of the isolates is doing scavenge at this moment of time
+    // it might see this table in an inconsitent state when
+    // some of the callbacks point to
+    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
+    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
+    // However this does not lead to any bugs as such isolate does not have
+    // profiling enabled and any isolate with enabled profiling is guaranteed
+    // to see the table in the consistent state.
+    scavenging_visitors_table_.CopyFrom(
+        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+
+    // We use Release_Store to prevent reordering of this write before writes
+    // to the table.
+    Release_Store(&scavenging_visitors_table_mode_,
+                  LOGGING_AND_PROFILING_ENABLED);
+  }
+}
 
 
 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
@@ -1484,7 +1550,7 @@
   MapWord first_word = object->map_word();
   ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  ScavengingVisitor::Scavenge(map, p, object);
+  DoScavengeObject(map, p, object);
 }
 
 
@@ -4757,10 +4823,10 @@
   gc_initializer_mutex->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-      initialized_gc = true;
-      ScavengingVisitor::Initialize();
-      NewSpaceScavenger::Initialize();
-      MarkCompactCollector::Initialize();
+    initialized_gc = true;
+    InitializeScavengingVisitorsTables();
+    NewSpaceScavenger::Initialize();
+    MarkCompactCollector::Initialize();
   }
   gc_initializer_mutex->Unlock();
 
diff --git a/src/heap.h b/src/heap.h
index 88074d7..7a1bed3 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -155,6 +155,7 @@
   V(name_symbol, "name")                                                 \
   V(number_symbol, "number")                                             \
   V(Number_symbol, "Number")                                             \
+  V(nan_symbol, "NaN")                                                   \
   V(RegExp_symbol, "RegExp")                                             \
   V(source_symbol, "source")                                             \
   V(global_symbol, "global")                                             \
@@ -1451,6 +1452,8 @@
   // Allocate empty fixed array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
 
+  void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
   // Performs a minor collection in new generation.
   void Scavenge();
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 9bbe164..6b0c8e4 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1342,12 +1342,17 @@
 }
 
 
-void HLoadGlobal::PrintDataTo(StringStream* stream) {
+void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
   if (check_hole_value()) stream->Add(" (deleteable/read-only)");
 }
 
 
+void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
+  stream->Add("%o ", *name());
+}
+
+
 void HStoreGlobal::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fed4b8b..ddef8b7 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -124,7 +124,8 @@
   V(LoadElements)                              \
   V(LoadExternalArrayPointer)                  \
   V(LoadFunctionPrototype)                     \
-  V(LoadGlobal)                                \
+  V(LoadGlobalCell)                            \
+  V(LoadGlobalGeneric)                         \
   V(LoadKeyedFastElement)                      \
   V(LoadKeyedGeneric)                          \
   V(LoadKeyedSpecializedArrayElement)          \
@@ -2809,9 +2810,9 @@
 };
 
 
-class HLoadGlobal: public HTemplateInstruction<0> {
+class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
       : cell_(cell), check_hole_value_(check_hole_value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
@@ -2832,11 +2833,11 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load_global_cell")
 
  protected:
   virtual bool DataEquals(HValue* other) {
-    HLoadGlobal* b = HLoadGlobal::cast(other);
+    HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
     return cell_.is_identical_to(b->cell());
   }
 
@@ -2846,6 +2847,38 @@
 };
 
 
+class HLoadGlobalGeneric: public HBinaryOperation {
+ public:
+  HLoadGlobalGeneric(HValue* context,
+                     HValue* global_object,
+                     Handle<Object> name,
+                     bool for_typeof)
+      : HBinaryOperation(context, global_object),
+        name_(name),
+        for_typeof_(for_typeof) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* global_object() { return OperandAt(1); }
+  Handle<Object> name() const { return name_; }
+  bool for_typeof() const { return for_typeof_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load_global_generic")
+
+ private:
+  Handle<Object> name_;
+  bool for_typeof_;
+};
+
+
 class HStoreGlobal: public HUnaryOperation {
  public:
   HStoreGlobal(HValue* value,
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 2383192..433618f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -606,7 +606,7 @@
 
   if (!FLAG_use_lithium) return Handle<Code>::null();
 
-  MacroAssembler assembler(NULL, 0);
+  MacroAssembler assembler(info->isolate(), NULL, 0);
   LCodeGen generator(chunk, &assembler, info);
 
   if (FLAG_eliminate_empty_blocks) {
@@ -1980,7 +1980,10 @@
 // Implementation of utility classes to represent an expression's context in
 // the AST.
 AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
-    : owner_(owner), kind_(kind), outer_(owner->ast_context()) {
+    : owner_(owner),
+      kind_(kind),
+      outer_(owner->ast_context()),
+      for_typeof_(false) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
   original_length_ = owner->environment()->length();
@@ -2124,6 +2127,14 @@
 }
 
 
+void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+  ValueContext for_value(this);
+  for_value.set_for_typeof(true);
+  Visit(expr);
+}
+
+
+
 void HGraphBuilder::VisitForControl(Expression* expr,
                                     HBasicBlock* true_block,
                                     HBasicBlock* false_block) {
@@ -2767,9 +2778,33 @@
 }
 
 
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+    Code* unoptimized_code, FunctionLiteral* expr) {
+  int start_position = expr->start_position();
+  RelocIterator it(unoptimized_code);
+  for (;!it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+    Object* obj = rinfo->target_object();
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+      if (shared->start_position() == start_position) {
+        return Handle<SharedFunctionInfo>(shared);
+      }
+    }
+  }
+
+  return Handle<SharedFunctionInfo>();
+}
+
+
 void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
   Handle<SharedFunctionInfo> shared_info =
-      Compiler::BuildFunctionInfo(expr, info()->script());
+      SearchSharedFunctionInfo(info()->shared_info()->code(),
+                               expr);
+  if (shared_info.is_null()) {
+    shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+  }
   CHECK_BAILOUT;
   HFunctionLiteral* instr =
       new HFunctionLiteral(shared_info, expr->pretenure());
@@ -2809,29 +2844,21 @@
 }
 
 
-void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
-                                             LookupResult* lookup,
-                                             bool is_store) {
-  if (var->is_this()) {
-    BAILOUT("global this reference");
-  }
-  if (!info()->has_global_object()) {
-    BAILOUT("no global object to optimize VariableProxy");
+HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
+    Variable* var, LookupResult* lookup, bool is_store) {
+  if (var->is_this() || !info()->has_global_object()) {
+    return kUseGeneric;
   }
   Handle<GlobalObject> global(info()->global_object());
   global->Lookup(*var->name(), lookup);
-  if (!lookup->IsProperty()) {
-    BAILOUT("global variable cell not yet introduced");
+  if (!lookup->IsProperty() ||
+      lookup->type() != NORMAL ||
+      (is_store && lookup->IsReadOnly()) ||
+      lookup->holder() != *global) {
+    return kUseGeneric;
   }
-  if (lookup->type() != NORMAL) {
-    BAILOUT("global variable has accessors");
-  }
-  if (is_store && lookup->IsReadOnly()) {
-    BAILOUT("read-only global variable");
-  }
-  if (lookup->holder() != *global) {
-    BAILOUT("global property on prototype of global object");
-  }
+
+  return kUseCell;
 }
 
 
@@ -2867,19 +2894,33 @@
     ast_context()->ReturnInstruction(instr, expr->id());
   } else if (variable->is_global()) {
     LookupResult lookup;
-    LookupGlobalPropertyCell(variable, &lookup, false);
-    CHECK_BAILOUT;
+    GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
 
-    Handle<GlobalObject> global(info()->global_object());
-    // TODO(3039103): Handle global property load through an IC call when access
-    // checks are enabled.
-    if (global->IsAccessCheckNeeded()) {
-      BAILOUT("global object requires access check");
+    if (type == kUseCell &&
+        info()->global_object()->IsAccessCheckNeeded()) {
+      type = kUseGeneric;
     }
-    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-    HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
-    ast_context()->ReturnInstruction(instr, expr->id());
+
+    if (type == kUseCell) {
+      Handle<GlobalObject> global(info()->global_object());
+      Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+      bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+      HLoadGlobalCell* instr = new HLoadGlobalCell(cell, check_hole);
+      ast_context()->ReturnInstruction(instr, expr->id());
+    } else {
+      HContext* context = new HContext;
+      AddInstruction(context);
+      HGlobalObject* global_object = new HGlobalObject(context);
+      AddInstruction(global_object);
+      HLoadGlobalGeneric* instr =
+          new HLoadGlobalGeneric(context,
+                                 global_object,
+                                 variable->name(),
+                                 ast_context()->is_for_typeof());
+      instr->set_position(expr->position());
+      ASSERT(instr->HasSideEffects());
+      ast_context()->ReturnInstruction(instr, expr->id());
+    }
   } else {
     BAILOUT("reference to a variable which requires dynamic lookup");
   }
@@ -3250,16 +3291,18 @@
                                                    int position,
                                                    int ast_id) {
   LookupResult lookup;
-  LookupGlobalPropertyCell(var, &lookup, true);
-  CHECK_BAILOUT;
-
-  bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-  Handle<GlobalObject> global(info()->global_object());
-  Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-  HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
-  instr->set_position(position);
-  AddInstruction(instr);
-  if (instr->HasSideEffects()) AddSimulate(ast_id);
+  GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+  if (type == kUseCell) {
+    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+    Handle<GlobalObject> global(info()->global_object());
+    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+    HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
+    instr->set_position(position);
+    AddInstruction(instr);
+    if (instr->HasSideEffects()) AddSimulate(ast_id);
+  } else {
+    BAILOUT("global store only supported for cells");
+  }
 }
 
 
@@ -3828,12 +3871,18 @@
 
 void HGraphBuilder::TraceInline(Handle<JSFunction> target, const char* reason) {
   if (FLAG_trace_inlining) {
-    SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
-    SmartPointer<char> caller =
-        info()->function()->debug_name()->ToCString();
     if (reason == NULL) {
+      // We are currently in the context of inlined function thus we have
+      // to go to an outer FunctionState to get caller.
+      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
+      SmartPointer<char> caller =
+          function_state()->outer()->compilation_info()->function()->
+              debug_name()->ToCString();
       PrintF("Inlined %s called from %s.\n", *callee, *caller);
     } else {
+      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
+      SmartPointer<char> caller =
+          info()->function()->debug_name()->ToCString();
       PrintF("Did not inline %s called from %s (%s).\n",
              *callee, *caller, reason);
     }
@@ -4287,10 +4336,12 @@
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      if (info()->has_global_object() &&
+      LookupResult lookup;
+      GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+      if (type == kUseCell &&
           !info()->global_object()->IsAccessCheckNeeded()) {
         Handle<GlobalObject> global(info()->global_object());
-        known_global_function = expr->ComputeGlobalTarget(global, var->name());
+        known_global_function = expr->ComputeGlobalTarget(global, &lookup);
       }
       if (known_global_function) {
         // Push the global object instead of the global receiver because
@@ -4493,7 +4544,8 @@
     }
 
   } else if (op == Token::TYPEOF) {
-    VISIT_FOR_VALUE(expr->expression());
+    VisitForTypeOf(expr->expression());
+    if (HasStackOverflow()) return;
     HValue* value = Pop();
     ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
 
@@ -4896,7 +4948,8 @@
   if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
       left_unary != NULL && left_unary->op() == Token::TYPEOF &&
       right_literal != NULL && right_literal->handle()->IsString()) {
-    VISIT_FOR_VALUE(left_unary->expression());
+    VisitForTypeOf(left_unary->expression());
+    if (HasStackOverflow()) return;
     HValue* left = Pop();
     HInstruction* instr = new HTypeofIs(left,
         Handle<String>::cast(right_literal->handle()));
@@ -5093,7 +5146,14 @@
 // Support for construct call checks.
 void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
-  ast_context()->ReturnInstruction(new HIsConstructCall, call->id());
+  if (function_state()->outer() != NULL) {
+    // We are generating graph for inlined function. Currently
+    // constructor inlining is not supported and we can just return
+    // false from %_IsConstructCall().
+    ast_context()->ReturnValue(graph()->GetConstantFalse());
+  } else {
+    ast_context()->ReturnInstruction(new HIsConstructCall, call->id());
+  }
 }
 
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e14799a..93664e9 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -453,6 +453,9 @@
   // the instruction as value.
   virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
 
+  void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
+  bool is_for_typeof() { return for_typeof_; }
+
  protected:
   AstContext(HGraphBuilder* owner, Expression::Context kind);
   virtual ~AstContext();
@@ -469,6 +472,7 @@
   HGraphBuilder* owner_;
   Expression::Context kind_;
   AstContext* outer_;
+  bool for_typeof_;
 };
 
 
@@ -544,6 +548,8 @@
     test_context_ = NULL;
   }
 
+  FunctionState* outer() { return outer_; }
+
  private:
   HGraphBuilder* owner_;
 
@@ -735,6 +741,7 @@
   void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
 
   void VisitForValue(Expression* expr);
+  void VisitForTypeOf(Expression* expr);
   void VisitForEffect(Expression* expr);
   void VisitForControl(Expression* expr,
                        HBasicBlock* true_block,
@@ -770,9 +777,13 @@
   HBasicBlock* CreateLoopHeaderBlock();
 
   // Helpers for flow graph construction.
-  void LookupGlobalPropertyCell(Variable* var,
-                                LookupResult* lookup,
-                                bool is_store);
+  enum GlobalPropertyAccess {
+    kUseCell,
+    kUseGeneric
+  };
+  GlobalPropertyAccess LookupGlobalProperty(Variable* var,
+                                            LookupResult* lookup,
+                                            bool is_store);
 
   bool TryArgumentsAccess(Property* expr);
   bool TryCallApply(Call* expr);
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 1da3f81..a9247f4 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -225,9 +225,9 @@
     StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
@@ -237,7 +237,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e6d245e..9273037 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -48,24 +48,37 @@
 // -----------------------------------------------------------------------------
 // Implementation of CpuFeatures
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe(bool portable) {
-  ASSERT(HEAP->HasBeenSetup());
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
   ASSERT(supported_ == 0);
-  if (portable && Serializer::enabled()) {
+#ifdef DEBUG
+  initialized_ = true;
+#endif
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
 
-  Assembler assm(NULL, 0);
+  const int kBufferSize = 4 * KB;
+  VirtualMemory* memory = new VirtualMemory(kBufferSize);
+  if (!memory->IsReserved()) {
+    delete memory;
+    return;
+  }
+  ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+  if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+    delete memory;
+    return;
+  }
+
+  Assembler assm(NULL, memory->address(), kBufferSize);
   Label cpuid, done;
 #define __ assm.
   // Save old esp, since we are going to modify the stack.
@@ -119,27 +132,15 @@
   __ ret(0);
 #undef __
 
-  CodeDesc desc;
-  assm.GetCode(&desc);
-  Object* code;
-  { MaybeObject* maybe_code =
-        assm.isolate()->heap()->CreateCode(desc,
-                                           Code::ComputeFlags(Code::STUB),
-                                           Handle<Code>::null());
-    if (!maybe_code->ToObject(&code)) return;
-  }
-  if (!code->IsCode()) return;
-
-  PROFILE(ISOLATE,
-          CodeCreateEvent(Logger::BUILTIN_TAG,
-                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
-  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
   supported_ = probe();
   found_by_runtime_probing_ = supported_;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+  found_by_runtime_probing_ &= ~os_guarantees;
+
+  delete memory;
 }
 
 
@@ -297,8 +298,8 @@
 static void InitCoverageLog();
 #endif
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
       emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
@@ -386,7 +387,7 @@
 
 
 void Assembler::cpuid() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
+  ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x0F);
@@ -747,7 +748,7 @@
 
 
 void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   UNIMPLEMENTED();
@@ -758,7 +759,7 @@
 
 
 void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   UNIMPLEMENTED();
@@ -769,7 +770,7 @@
 
 
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
@@ -1450,7 +1451,7 @@
 
 
 void Assembler::rdtsc() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(RDTSC));
+  ASSERT(CpuFeatures::IsEnabled(RDTSC));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x0F);
@@ -1856,7 +1857,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xDB);
@@ -1865,7 +1866,7 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xDD);
@@ -2134,7 +2135,7 @@
 
 
 void Assembler::cvttss2si(Register dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2145,7 +2146,7 @@
 
 
 void Assembler::cvttsd2si(Register dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2156,7 +2157,7 @@
 
 
 void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2167,7 +2168,7 @@
 
 
 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2178,7 +2179,7 @@
 
 
 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2189,7 +2190,7 @@
 
 
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2200,7 +2201,7 @@
 
 
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2211,7 +2212,7 @@
 
 
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2222,7 +2223,7 @@
 
 
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2233,7 +2234,7 @@
 
 
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2264,7 +2265,7 @@
 
 
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2275,7 +2276,7 @@
 
 
 void Assembler::movmskpd(Register dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2286,7 +2287,7 @@
 
 
 void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2298,7 +2299,7 @@
 
 
 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x0F);
@@ -2308,7 +2309,7 @@
 
 
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2319,7 +2320,7 @@
 
 
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2330,7 +2331,7 @@
 
 
 void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2341,7 +2342,7 @@
 
 
 void Assembler::movdqu(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2352,7 +2353,7 @@
 
 
 void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2364,7 +2365,7 @@
 
 
 void Assembler::movntdq(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2400,7 +2401,7 @@
 
 
 void Assembler::movsd(const Operand& dst, XMMRegister src ) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);  // double
@@ -2411,7 +2412,7 @@
 
 
 void Assembler::movsd(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);  // double
@@ -2422,7 +2423,7 @@
 
 
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2433,7 +2434,7 @@
 
 
 void Assembler::movss(const Operand& dst, XMMRegister src ) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);  // float
@@ -2444,7 +2445,7 @@
 
 
 void Assembler::movss(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);  // float
@@ -2455,7 +2456,7 @@
 
 
 void Assembler::movss(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2466,7 +2467,7 @@
 
 
 void Assembler::movd(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2477,7 +2478,7 @@
 
 
 void Assembler::movd(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2488,7 +2489,7 @@
 
 
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2499,7 +2500,7 @@
 
 
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2510,7 +2511,7 @@
 
 
 void Assembler::por(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2521,7 +2522,7 @@
 
 
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2533,7 +2534,7 @@
 
 
 void Assembler::psllq(XMMRegister reg, int8_t shift) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2545,7 +2546,7 @@
 
 
 void Assembler::psllq(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2556,7 +2557,7 @@
 
 
 void Assembler::psrlq(XMMRegister reg, int8_t shift) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2568,7 +2569,7 @@
 
 
 void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2579,7 +2580,7 @@
 
 
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2591,7 +2592,7 @@
 
 
 void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2604,7 +2605,7 @@
 
 
 void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 8e0c762..079dca7 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -446,16 +446,15 @@
 //   } else {
 //     // Generate standard x87 floating point code.
 //   }
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
-  // Detect features of the target CPU. If the portable flag is set,
-  // the method sets safe defaults if the serializer is enabled
-  // (snapshots must be portable).
-  void Probe(bool portable);
-  void Clear() { supported_ = 0; }
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == SSE2 && !FLAG_enable_sse2) return false;
     if (f == SSE3 && !FLAG_enable_sse3) return false;
     if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
@@ -463,46 +462,85 @@
     if (f == RDTSC && !FLAG_enable_rdtsc) return false;
     return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
   }
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    uint64_t enabled = isolate->enabled_cpu_features();
+    return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
   }
+#endif
+
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
+    explicit Scope(CpuFeature f) {
       uint64_t mask = static_cast<uint64_t>(1) << f;
-      ASSERT(cpu_features_->IsSupported(f));
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-          (cpu_features_->found_by_runtime_probing_ & mask) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= mask;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = isolate_->enabled_cpu_features();
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    uint64_t old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    uint64_t old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const uint64_t old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  uint64_t supported_;
-  uint64_t enabled_;
-  uint64_t found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static uint64_t supported_;
+  static uint64_t found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -535,7 +573,8 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  // TODO(vitalyr): the assembler does not need an isolate.
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 2970a0e..97d2b03 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1523,12 +1523,8 @@
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  // We shouldn't be performing on-stack replacement in the first
-  // place if the CPU features we need for the optimized Crankshaft
-  // code aren't supported.
-  CpuFeatures* cpu_features = masm->isolate()->cpu_features();
-  cpu_features->Probe(false);
-  if (!cpu_features->IsSupported(SSE2)) {
+  CpuFeatures::TryForceFeatureScope scope(SSE2);
+  if (!CpuFeatures::IsSupported(SSE2)) {
     __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
     return;
   }
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 96faae9..78daf7c 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -771,7 +771,7 @@
         // number in eax.
         __ AllocateHeapNumber(eax, ecx, ebx, slow);
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(left));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -821,7 +821,7 @@
       }
       if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
         __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           FloatingPointHelper::LoadSSE2Smis(masm, ebx);
           switch (op_) {
@@ -926,7 +926,7 @@
         }
 
         Label not_floats;
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           if (static_operands_type_.IsNumber()) {
             if (FLAG_debug_code) {
@@ -1060,7 +1060,7 @@
             default: UNREACHABLE();
           }
           // Store the result in the HeapNumber and return.
-          if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+          if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatures::Scope use_sse2(SSE2);
             __ cvtsi2sd(xmm0, Operand(ebx));
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -1660,7 +1660,7 @@
         // number in eax.
         __ AllocateHeapNumber(eax, ecx, ebx, slow);
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(left));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -1705,7 +1705,7 @@
             break;
         }
         __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           FloatingPointHelper::LoadSSE2Smis(masm, ebx);
           switch (op_) {
@@ -1837,7 +1837,7 @@
     case Token::DIV: {
       Label not_floats;
       Label not_int32;
-      if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
         FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
@@ -1958,7 +1958,7 @@
           default: UNREACHABLE();
         }
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2070,7 +2070,7 @@
     case Token::MUL:
     case Token::DIV: {
       Label not_floats;
-      if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
 
@@ -2173,7 +2173,7 @@
           default: UNREACHABLE();
         }
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2275,7 +2275,7 @@
     case Token::MUL:
     case Token::DIV: {
       Label not_floats;
-      if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
 
@@ -2373,7 +2373,7 @@
           default: UNREACHABLE();
         }
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2572,7 +2572,7 @@
 
     __ bind(&loaded);
   } else {  // UNTAGGED.
-    if (masm->isolate()->cpu_features()->IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
       __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
     } else {
@@ -2826,8 +2826,7 @@
   Label done, right_exponent, normal_exponent;
   Register scratch = ebx;
   Register scratch2 = edi;
-  if (type_info.IsInteger32() &&
-      masm->isolate()->cpu_features()->IsEnabled(SSE2)) {
+  if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope scope(SSE2);
     __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
     return;
@@ -3375,7 +3374,7 @@
     IntegerConvert(masm,
                    eax,
                    TypeInfo::Unknown(),
-                   masm->isolate()->cpu_features()->IsSupported(SSE3),
+                   CpuFeatures::IsSupported(SSE3),
                    &slow);
 
     // Do the bitwise operation and check if the result fits in a smi.
@@ -3398,7 +3397,7 @@
       __ AllocateHeapNumber(ebx, edx, edi, &slow);
       __ mov(eax, Operand(ebx));
     }
-    if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope use_sse2(SSE2);
       __ cvtsi2sd(xmm0, Operand(ecx));
       __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -4270,7 +4269,7 @@
                         FixedArray::kHeaderSize));
     __ test(probe, Immediate(kSmiTagMask));
     __ j(zero, not_found);
-    if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
       __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
@@ -4509,7 +4508,7 @@
   if (include_number_compare_) {
     Label non_number_comparison;
     Label unordered;
-    if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope use_sse2(SSE2);
       CpuFeatures::Scope use_cmov(CMOV);
 
@@ -6455,8 +6454,7 @@
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or SS2 or CMOV is unsupported.
-  CpuFeatures* cpu_features = masm->isolate()->cpu_features();
-  if (cpu_features->IsSupported(SSE2) && cpu_features->IsSupported(CMOV)) {
+  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
     CpuFeatures::Scope scope1(SSE2);
     CpuFeatures::Scope scope2(CMOV);
 
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 31fa645..d116bf7 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -96,7 +96,7 @@
     if (static_operands_type_.IsSmi()) {
       mode_ = NO_OVERWRITE;
     }
-    use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
@@ -235,7 +235,7 @@
         operands_type_(TRBinaryOpIC::UNINITIALIZED),
         result_type_(TRBinaryOpIC::UNINITIALIZED),
         name_(NULL) {
-    use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index cf990a0..8a47e72 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -556,7 +556,7 @@
     __ sar(val, 1);
     // If there was an overflow, bits 30 and 31 of the original number disagree.
     __ xor_(val, 0x80000000u);
-    if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ cvtsi2sd(xmm0, Operand(val));
     } else {
@@ -574,7 +574,7 @@
                           no_reg, &allocation_failed);
     VirtualFrame* clone = new VirtualFrame(frame_);
     scratch.Unuse();
-    if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
     } else {
@@ -587,7 +587,7 @@
     RegisterFile empty_regs;
     SetFrame(clone, &empty_regs);
     __ bind(&allocation_failed);
-    if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (!CpuFeatures::IsSupported(SSE2)) {
       // Pop the value from the floating point stack.
       __ fstp(0);
     }
@@ -614,7 +614,7 @@
       safe_int32_mode_enabled() &&
       expr->side_effect_free() &&
       expr->num_bit_ops() > 2 &&
-      masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+      CpuFeatures::IsSupported(SSE2)) {
     BreakTarget unsafe_bailout;
     JumpTarget done;
     unsafe_bailout.set_expected_height(frame_->height());
@@ -995,7 +995,7 @@
 
 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
   if (Token::IsBitOp(op_) &&
-      masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+      CpuFeatures::IsSupported(SSE2)) {
     return &non_smi_input_;
   } else {
     return entry_label();
@@ -1018,7 +1018,7 @@
 void DeferredInlineBinaryOperation::Generate() {
   // Registers are not saved implicitly for this stub, so we should not
   // tread on the registers that were not passed to us.
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
+  if (CpuFeatures::IsSupported(SSE2) &&
       ((op_ == Token::ADD) ||
        (op_ == Token::SUB) ||
        (op_ == Token::MUL) ||
@@ -1154,7 +1154,7 @@
     // The left_ and right_ registers have not been initialized yet.
     __ mov(right_, Immediate(smi_value_));
     __ mov(left_, Operand(dst_));
-    if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (!CpuFeatures::IsSupported(SSE2)) {
       __ jmp(entry_label());
       return;
     } else {
@@ -1267,7 +1267,7 @@
   // This trashes right_.
   __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
   __ bind(&allocation_ok);
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
+  if (CpuFeatures::IsSupported(SSE2) &&
       op_ != Token::SHR) {
     CpuFeatures::Scope use_sse2(SSE2);
     ASSERT(Token::IsBitOp(op_));
@@ -3032,7 +3032,7 @@
       // constant smi.  If the non-smi is a heap number and this is not
       // a loop condition, inline the floating point code.
       if (!is_loop_condition &&
-          masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+          CpuFeatures::IsSupported(SSE2)) {
         // Right side is a constant smi and left side has been checked
         // not to be a smi.
         CpuFeatures::Scope use_sse2(SSE2);
@@ -3196,7 +3196,7 @@
   ASSERT(right_side->is_register());
 
   JumpTarget not_numbers;
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
 
     // Load left and right operand into registers xmm0 and xmm1 and compare.
@@ -7448,15 +7448,16 @@
 
   __ bind(&heapnumber_allocated);
 
-  __ PrepareCallCFunction(0, ebx);
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
-                   0);
+                   1);
 
   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   // This is implemented on both SSE2 and FPU.
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
     __ movd(xmm1, Operand(ebx));
@@ -7862,7 +7863,7 @@
   ASSERT(args->length() == 2);
   Load(args->at(0));
   Load(args->at(1));
-  if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2)) {
     Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
     frame_->Push(&res);
   } else {
@@ -8079,7 +8080,7 @@
   ASSERT_EQ(args->length(), 1);
   Load(args->at(0));
 
-  if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2)) {
     Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
     frame()->Push(&result);
   } else {
@@ -10174,9 +10175,14 @@
 }
 
 
-MemCopyFunction CreateMemCopyFunction() {
-  HandleScope scope;
-  MacroAssembler masm(NULL, 1 * KB);
+OS::MemCopyFunction CreateMemCopyFunction() {
+  size_t actual_size;
+  // Allocate buffer in executable space.
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+                                                 &actual_size,
+                                                 true));
+  if (buffer == NULL) return &MemCopyWrapper;
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
 
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10198,13 +10204,13 @@
 
   if (FLAG_debug_code) {
     __ cmp(Operand(esp, kSizeOffset + stack_offset),
-           Immediate(kMinComplexMemCopy));
+           Immediate(OS::kMinComplexMemCopy));
     Label ok;
     __ j(greater_equal, &ok);
     __ int3();
     __ bind(&ok);
   }
-  if (masm.isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope enable(SSE2);
     __ push(edi);
     __ push(esi);
@@ -10232,7 +10238,6 @@
     __ test(Operand(src), Immediate(0x0F));
     __ j(not_zero, &unaligned_source);
     {
-      __ IncrementCounter(masm.isolate()->counters()->memcopy_aligned(), 1);
       // Copy loop for aligned source and destination.
       __ mov(edx, count);
       Register loop_count = ecx;
@@ -10280,7 +10285,6 @@
       // Copy loop for unaligned source and aligned destination.
       // If source is not aligned, we can't read it as efficiently.
       __ bind(&unaligned_source);
-      __ IncrementCounter(masm.isolate()->counters()->memcopy_unaligned(), 1);
       __ mov(edx, ecx);
       Register loop_count = ecx;
       Register count = edx;
@@ -10324,7 +10328,6 @@
     }
 
   } else {
-    __ IncrementCounter(masm.isolate()->counters()->memcopy_noxmm(), 1);
     // SSE2 not supported. Unlikely to happen in practice.
     __ push(edi);
     __ push(esi);
@@ -10371,13 +10374,8 @@
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);
 
-  // Copy the generated code into an executable chunk and return a pointer
-  // to the first instruction in it as a C++ function pointer.
-  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
-  if (chunk == NULL) return &MemCopyWrapper;
-  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
-  return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
+  CPU::FlushICache(buffer, actual_size);
+  return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
 }
 
 #undef __
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 286ed7b..615dbfe 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -42,12 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Clear();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(SSE2) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(SSE2);
 }
 
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index c6342d7..72fdac8 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -641,14 +641,16 @@
   __ neg(edx);
 
   // Allocate a new deoptimizer object.
-  __ PrepareCallCFunction(5, eax);
+  __ PrepareCallCFunction(6, eax);
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
   __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
   __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Code address or 0.
   __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ mov(Operand(esp, 5 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
 
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 8084694..0f95abd 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -108,7 +108,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 16c39c5..3f72def 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1563,27 +1563,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2268,15 +2267,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the full code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2458,10 +2448,73 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // TODO(3110205): Implement this.
-  // Currently unimplemented.  Emit false, a safe choice.
+  if (FLAG_debug_code) __ AbortIfSmi(eax);
+
+  // Check whether this map has already been checked to be safe for default
+  // valueOf.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
+            1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ j(not_zero, if_true);
+
+  // Check for fast case object. Return false for slow case objects.
+  __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ cmp(ecx, FACTORY->hash_table_map());
+  __ j(equal, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ mov(ebx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+  __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  // ebx: descriptor array
+  // ecx: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+  // Calculate location of the first key name.
+  __ add(Operand(ebx),
+           Immediate(FixedArray::kHeaderSize +
+                     DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(edx, FieldOperand(ebx, 0));
+  __ cmp(edx, FACTORY->value_of_symbol());
+  __ j(equal, if_false);
+  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ bind(&entry);
+  __ cmp(ebx, Operand(ecx));
+  __ j(not_equal, &loop);
+
+  // Reload map as register ebx was used as temporary above.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(edx,
+         FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+  __ cmp(ecx,
+         ContextOperand(edx,
+                        Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, if_false);
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+         Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2717,15 +2770,16 @@
 
   __ bind(&heapnumber_allocated);
 
-  __ PrepareCallCFunction(0, ebx);
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
-                   0);
+                   1);
 
   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   // This is implemented on both SSE2 and FPU.
-  if (isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
     __ movd(xmm1, Operand(ebx));
@@ -2800,7 +2854,7 @@
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
-  if (isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     MathPowStub stub;
     __ CallStub(&stub);
   } else {
@@ -3778,7 +3832,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailout(expr->increment(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 1691098..eb31d5a 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1062,7 +1062,7 @@
     uint64_t int_val = BitCast<uint64_t, double>(v);
     int32_t lower = static_cast<int32_t>(int_val);
     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-    if (isolate()->cpu_features()->IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
       if (lower != 0) {
         __ Set(temp, Immediate(lower));
@@ -2032,7 +2032,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
   if (instr->hydrogen()->check_hole_value()) {
@@ -2042,6 +2042,19 @@
 }
 
 
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->global_object()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  __ mov(ecx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
@@ -2304,11 +2317,11 @@
         break;
       case kExternalUnsignedIntArray:
         __ mov(result, Operand(external_pointer, key, times_4, 0));
-        __ test(Operand(result), Immediate(0x80000000));
+        __ test(result, Operand(result));
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
         // happens, and generate code that returns a double rather than int.
-        DeoptimizeIf(not_zero, instr->environment());
+        DeoptimizeIf(negative, instr->environment());
         break;
       case kExternalFloatArray:
         UNREACHABLE();
@@ -3427,7 +3440,7 @@
     __ jmp(&done);
 
     __ bind(&heap_number);
-    if (isolate()->cpu_features()->IsSupported(SSE3)) {
+    if (CpuFeatures::IsSupported(SSE3)) {
       CpuFeatures::Scope scope(SSE3);
       NearLabel convert;
       // Use more powerful conversion when sse3 is available.
@@ -3537,7 +3550,7 @@
     // the JS bitwise operations.
     __ cvttsd2si(result_reg, Operand(input_reg));
     __ cmp(result_reg, 0x80000000u);
-    if (isolate()->cpu_features()->IsSupported(SSE3)) {
+    if (CpuFeatures::IsSupported(SSE3)) {
       // This will deoptimize if the exponent of the input in out of range.
       CpuFeatures::Scope scope(SSE3);
       NearLabel convert, done;
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index eabfecc..3d1da40 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "ia32/lithium-gap-resolver-ia32.h"
 #include "ia32/lithium-codegen-ia32.h"
 
@@ -460,3 +462,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 199a80a..d9192d4 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1187,7 +1187,7 @@
 
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
-  LOperand* argument = UseOrConstant(instr->argument());
+  LOperand* argument = UseAny(instr->argument());
   return new LPushArgument(argument);
 }
 
@@ -1633,9 +1633,8 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
-        CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
         LOperand* xmm_temp =
-            (instr->CanTruncateToInt32() && cpu_features->IsSupported(SSE3))
+            (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1656,7 +1655,7 @@
     } else {
       ASSERT(to.IsInteger32());
       bool needs_temp = instr->CanTruncateToInt32() &&
-          !Isolate::Current()->cpu_features()->IsSupported(SSE3);
+          !CpuFeatures::IsSupported(SSE3);
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1746,14 +1745,22 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* global_object = UseFixed(instr->global_object(), eax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
   LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
   return instr->check_hole_value() ? AssignEnvironment(result) : result;
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index a9d769b..e5792e8 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -121,7 +121,8 @@
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
   V(LoadFunctionPrototype)                      \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -1292,10 +1293,27 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ba30c49..4055498 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -41,11 +41,14 @@
 // -------------------------------------------------------------------------
 // MacroAssembler implementation.
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(isolate()->heap()->undefined_value()) {
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -231,7 +234,7 @@
 
 
 void MacroAssembler::FCmp() {
-  if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
+  if (CpuFeatures::IsSupported(CMOV)) {
     fucomip();
     ffree(0);
     fincstp();
@@ -1988,17 +1991,14 @@
 
 
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
-  int frameAlignment = OS::ActivationFrameAlignment();
-  if (frameAlignment != 0) {
+  int frame_alignment = OS::ActivationFrameAlignment();
+  if (frame_alignment != 0) {
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
     sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frameAlignment));
-    and_(esp, -frameAlignment);
+    ASSERT(IsPowerOf2(frame_alignment));
+    and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
     sub(Operand(esp), Immediate(num_arguments * kPointerSize));
@@ -2016,11 +2016,6 @@
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
-  // Pass current isolate address as additional parameter.
-  mov(Operand(esp, num_arguments * kPointerSize),
-      Immediate(ExternalReference::isolate_address()));
-  num_arguments += 1;
-
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -2030,13 +2025,15 @@
   if (OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
-    add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+    add(Operand(esp), Immediate(num_arguments * kPointerSize));
   }
 }
 
 
 CodePatcher::CodePatcher(byte* address, int size)
-    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+    : address_(address),
+      size_(size),
+      masm_(Isolate::Current(), address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index bafb175..946022a 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -56,7 +56,11 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // ---------------------------------------------------------------------------
   // GC Support
@@ -580,7 +584,10 @@
 
   void Move(Register target, Handle<Object> value);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
 
   // ---------------------------------------------------------------------------
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index f1c773b..067f8c8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -99,7 +99,7 @@
 RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -372,14 +372,18 @@
     __ push(backtrack_stackpointer());
     __ push(ebx);
 
-    static const int argument_count = 3;
+    static const int argument_count = 4;
     __ PrepareCallCFunction(argument_count, ecx);
     // Put arguments into allocated stack area, last argument highest on stack.
     // Parameters are
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
+    //   Isolate* isolate
 
+    // Set isolate.
+    __ mov(Operand(esp, 3 * kPointerSize),
+           Immediate(ExternalReference::isolate_address()));
     // Set byte_length.
     __ mov(Operand(esp, 2 * kPointerSize), ebx);
     // Set byte_offset2.
@@ -838,8 +842,10 @@
     __ push(edi);
 
     // Call GrowStack(backtrack_stackpointer())
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments, ebx);
+    __ mov(Operand(esp, 2 * kPointerSize),
+           Immediate(ExternalReference::isolate_address()));
     __ lea(eax, Operand(ebp, kStackHighEnd));
     __ mov(Operand(esp, 1 * kPointerSize), eax);
     __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 7730ee3..380d38f 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -1921,7 +1921,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  if (!isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2)) {
     return isolate()->heap()->undefined_value();
   }
 
@@ -3292,7 +3292,7 @@
       int arg_number = shared->GetThisPropertyAssignmentArgument(i);
       __ mov(ebx, edi);
       __ cmp(eax, arg_number);
-      if (isolate()->cpu_features()->IsSupported(CMOV)) {
+      if (CpuFeatures::IsSupported(CMOV)) {
         CpuFeatures::Scope use_cmov(CMOV);
         __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
       } else {
@@ -3611,10 +3611,10 @@
       // processors that don't support SSE2. The code in IntegerConvert
       // (code-stubs-ia32.cc) is roughly what is needed here though the
       // conversion failure case does not need to be handled.
-      if (isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         if (array_type != kExternalIntArray &&
             array_type != kExternalUnsignedIntArray) {
-          ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
+          ASSERT(CpuFeatures::IsSupported(SSE2));
           CpuFeatures::Scope scope(SSE2);
           __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
           // ecx: untagged integer value
@@ -3629,6 +3629,7 @@
                 __ bind(&done);
               }
               __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+              break;
             case kExternalByteArray:
             case kExternalUnsignedByteArray:
               __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
@@ -3642,7 +3643,7 @@
               break;
           }
         } else {
-          if (isolate()->cpu_features()->IsSupported(SSE3)) {
+          if (CpuFeatures::IsSupported(SSE3)) {
             CpuFeatures::Scope scope(SSE3);
             // fisttp stores values as signed integers. To represent the
             // entire range of int and unsigned int arrays, store as a
@@ -3655,7 +3656,7 @@
             __ pop(ecx);
             __ add(Operand(esp), Immediate(kPointerSize));
           } else {
-            ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
+            ASSERT(CpuFeatures::IsSupported(SSE2));
             CpuFeatures::Scope scope(SSE2);
             // We can easily implement the correct rounding behavior for the
             // range [0, 2^31-1]. For the time being, to keep this code simple,
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 2613caf..0304c32 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -828,7 +828,7 @@
           cgen()->unsafe_bailout_->Branch(not_equal);
         }
 
-        if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
+        if (!CpuFeatures::IsSupported(SSE2)) {
           UNREACHABLE();
         } else {
           CpuFeatures::Scope use_sse2(SSE2);
diff --git a/src/ic.cc b/src/ic.cc
index 382b438..dd4d25b 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1143,6 +1143,16 @@
 MaybeObject* KeyedLoadIC::Load(State state,
                                Handle<Object> object,
                                Handle<Object> key) {
+  // Check for values that can be converted into a symbol.
+  // TODO(1295): Remove this code.
+  HandleScope scope(isolate());
+  if (key->IsHeapNumber() &&
+      isnan(HeapNumber::cast(*key)->value())) {
+    key = isolate()->factory()->nan_symbol();
+  } else if (key->IsUndefined()) {
+    key = isolate()->factory()->undefined_symbol();
+  }
+
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
@@ -1815,8 +1825,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* CallIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   CallIC ic(isolate);
@@ -1846,8 +1855,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedCallIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedCallIC ic(isolate);
@@ -1868,8 +1876,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* LoadIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   LoadIC ic(isolate);
@@ -1879,8 +1886,7 @@
 
 
 // Used from ic-<arch>.cc
-MUST_USE_RESULT MaybeObject* KeyedLoadIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
@@ -1890,8 +1896,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* StoreIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   StoreIC ic(isolate);
@@ -1905,8 +1910,7 @@
 }
 
 
-MUST_USE_RESULT MaybeObject* StoreIC_ArrayLength(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
   NoHandleAllocation nha;
 
   ASSERT(args.length() == 2);
@@ -1927,9 +1931,7 @@
 // Extend storage is called in a store inline cache when
 // it is necessary to extend the properties array of a
 // JSObject.
-MUST_USE_RESULT MaybeObject* SharedStoreIC_ExtendStorage(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
 
@@ -1963,8 +1965,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
@@ -2037,8 +2038,7 @@
 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
 
 
-MUST_USE_RESULT MaybeObject* BinaryOp_Patch(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
   ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
@@ -2209,8 +2209,7 @@
                                           TRBinaryOpIC::TypeInfo result_type);
 
 
-MaybeObject* TypeRecordingBinaryOp_Patch(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, TypeRecordingBinaryOp_Patch) {
   ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
@@ -2365,8 +2364,7 @@
 
 
 // Used from ic_<arch>.cc.
-Code* CompareIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   CompareIC ic(isolate, static_cast<Token::Value>(Smi::cast(args[2])->value()));
diff --git a/src/isolate.cc b/src/isolate.cc
index a163532..cc9bc37 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -414,7 +414,6 @@
       runtime_profiler_(NULL),
       compilation_cache_(NULL),
       counters_(new Counters()),
-      cpu_features_(NULL),
       code_range_(NULL),
       break_access_(OS::CreateMutex()),
       logger_(new Logger()),
@@ -593,8 +592,6 @@
 
   delete counters_;
   counters_ = NULL;
-  delete cpu_features_;
-  cpu_features_ = NULL;
 
   delete handle_scope_implementer_;
   handle_scope_implementer_ = NULL;
@@ -680,7 +677,6 @@
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
-  cpu_features_ = new CpuFeatures();
   handle_scope_implementer_ = new HandleScopeImplementer();
   stub_cache_ = new StubCache(this);
   ast_sentinels_ = new AstSentinels();
@@ -725,9 +721,6 @@
   CpuProfiler::Setup();
   HeapProfiler::Setup();
 
-  // Setup the platform OS support.
-  OS::Setup();
-
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
@@ -786,11 +779,6 @@
   // stack guard.
   heap_.SetStackLimits();
 
-  // Setup the CPU support. Must be done after heap setup and after
-  // any deserialization because we have to have the initial heap
-  // objects in place for creating the code object used for probing.
-  CPU::Setup();
-
   deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
   runtime_profiler_->Setup();
diff --git a/src/isolate.h b/src/isolate.h
index 03a4866..a9962c3 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -242,6 +242,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 #define ISOLATE_DEBUGGER_INIT_LIST(V)                                          \
+  V(uint64_t, enabled_cpu_features, 0)                                         \
   V(v8::Debug::EventCallback, debug_event_callback, NULL)                      \
   V(DebuggerAgent*, debugger_agent_instance, NULL)
 #else
@@ -708,10 +709,6 @@
 
   Bootstrapper* bootstrapper() { return bootstrapper_; }
   Counters* counters() { return counters_; }
-  // TODO(isolates): Having CPU features per isolate is probably too
-  // flexible. We only really need to have the set of currently
-  // enabled features for asserts in DEBUG builds.
-  CpuFeatures* cpu_features() { return cpu_features_; }
   CodeRange* code_range() { return code_range_; }
   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
   CompilationCache* compilation_cache() { return compilation_cache_; }
@@ -1029,7 +1026,6 @@
   RuntimeProfiler* runtime_profiler_;
   CompilationCache* compilation_cache_;
   Counters* counters_;
-  CpuFeatures* cpu_features_;
   CodeRange* code_range_;
   Mutex* break_access_;
   Heap heap_;
diff --git a/src/liveedit.cc b/src/liveedit.cc
index dbcf5ef..dc50357 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1013,8 +1013,8 @@
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
   if (IsJSFunctionCode(shared_info->code())) {
-    ReplaceCodeObject(shared_info->code(),
-                      *(compile_info_wrapper.GetFunctionCode()));
+    Handle<Code> code = compile_info_wrapper.GetFunctionCode();
+    ReplaceCodeObject(shared_info->code(), *code);
     Handle<Object> code_scope_info =  compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
       shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
@@ -1028,8 +1028,10 @@
     debug_info->set_original_code(*new_original_code);
   }
 
-  shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
-  shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
+  int start_position = compile_info_wrapper.GetStartPosition();
+  int end_position = compile_info_wrapper.GetEndPosition();
+  shared_info->set_start_position(start_position);
+  shared_info->set_end_position(end_position);
 
   shared_info->set_construct_stub(
       Isolate::Current()->builtins()->builtin(
@@ -1233,13 +1235,14 @@
   int old_function_start = info->start_position();
   int new_function_start = TranslatePosition(old_function_start,
                                              position_change_array);
-  info->set_start_position(new_function_start);
-  info->set_end_position(TranslatePosition(info->end_position(),
-                                           position_change_array));
+  int new_function_end = TranslatePosition(info->end_position(),
+                                           position_change_array);
+  int new_function_token_pos =
+      TranslatePosition(info->function_token_position(), position_change_array);
 
-  info->set_function_token_position(
-      TranslatePosition(info->function_token_position(),
-      position_change_array));
+  info->set_start_position(new_function_start);
+  info->set_end_position(new_function_end);
+  info->set_function_token_position(new_function_token_pos);
 
   if (IsJSFunctionCode(info->code())) {
     // Patch relocation info section of the code.
diff --git a/src/log.cc b/src/log.cc
index 6991f3d..3ca00a6 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -150,6 +150,7 @@
 
   sample->tos = NULL;
   sample->frames_count = 0;
+  sample->has_external_callback = false;
 
   // Avoid collecting traces while doing GC.
   if (sample->state == GC) return;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1f73388..3770bc3 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -86,15 +86,15 @@
     GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
     EncodeForwardingAddresses();
 
-    heap_->MarkMapPointersAsEncoded(true);
+    heap()->MarkMapPointersAsEncoded(true);
     UpdatePointers();
-    heap_->MarkMapPointersAsEncoded(false);
-    heap_->isolate()->pc_to_code_cache()->Flush();
+    heap()->MarkMapPointersAsEncoded(false);
+    heap()->isolate()->pc_to_code_cache()->Flush();
 
     RelocateObjects();
   } else {
     SweepSpaces();
-    heap_->isolate()->pc_to_code_cache()->Flush();
+    heap()->isolate()->pc_to_code_cache()->Flush();
   }
 
   Finish();
@@ -123,7 +123,7 @@
   compact_on_next_gc_ = false;
 
   if (FLAG_never_compact) compacting_collection_ = false;
-  if (!HEAP->map_space()->MapPointersEncodable())
+  if (!heap()->map_space()->MapPointersEncodable())
       compacting_collection_ = false;
   if (FLAG_collect_maps) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
@@ -161,9 +161,9 @@
   // force lazy re-initialization of it. This must be done after the
   // GC, because it relies on the new address of certain old space
   // objects (empty string, illegal builtin).
-  Isolate::Current()->stub_cache()->Clear();
+  heap()->isolate()->stub_cache()->Clear();
 
-  heap_->external_string_table_.CleanUp();
+  heap()->external_string_table_.CleanUp();
 
   // If we've just compacted old space there's no reason to check the
   // fragmentation limit. Just return.
@@ -456,7 +456,7 @@
     for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
   }
 
-  static inline void VisitCodeTarget(RelocInfo* rinfo) {
+  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
     if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
@@ -464,48 +464,50 @@
       // Please note targets for cleared inline cached do not have to be
       // marked since they are contained in HEAP->non_monomorphic_cache().
     } else {
-      HEAP->mark_compact_collector()->MarkObject(code);
+      heap->mark_compact_collector()->MarkObject(code);
     }
   }
 
-  static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+  static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
     Object* cell = rinfo->target_cell();
     Object* old_cell = cell;
-    VisitPointer(HEAP, &cell);
+    VisitPointer(heap, &cell);
     if (cell != old_cell) {
       rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
     }
   }
 
-  static inline void VisitDebugTarget(RelocInfo* rinfo) {
+  static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
             rinfo->IsPatchedReturnSequence()) ||
            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
             rinfo->IsPatchedDebugBreakSlotSequence()));
     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    HEAP->mark_compact_collector()->MarkObject(code);
+    heap->mark_compact_collector()->MarkObject(code);
   }
 
   // Mark object pointed to by p.
   INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
-    heap->mark_compact_collector()->MarkObject(object);
+    if (!object->IsMarked()) {
+      heap->mark_compact_collector()->MarkUnmarkedObject(object);
+    }
   }
 
 
   // Visit an unmarked object.
-  static inline void VisitUnmarkedObject(HeapObject* obj) {
+  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
+                                         HeapObject* obj)) {
 #ifdef DEBUG
-    ASSERT(HEAP->Contains(obj));
+    ASSERT(Isolate::Current()->heap()->Contains(obj));
     ASSERT(!obj->IsMarked());
 #endif
     Map* map = obj->map();
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
     collector->SetMark(obj);
     // Mark the map pointer and the body.
-    collector->MarkObject(map);
+    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
     IterateBody(map, obj);
   }
 
@@ -518,12 +520,13 @@
     StackLimitCheck check(heap->isolate());
     if (check.HasOverflowed()) return false;
 
+    MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
     for (Object** p = start; p < end; p++) {
       if (!(*p)->IsHeapObject()) continue;
       HeapObject* obj = HeapObject::cast(*p);
       if (obj->IsMarked()) continue;
-      VisitUnmarkedObject(obj);
+      VisitUnmarkedObject(collector, obj);
     }
     return true;
   }
@@ -561,8 +564,8 @@
   // flushed.
   static const int kCodeAgeThreshold = 5;
 
-  inline static bool HasSourceCode(SharedFunctionInfo* info) {
-    Object* undefined = HEAP->raw_unchecked_undefined_value();
+  inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+    Object* undefined = heap->raw_unchecked_undefined_value();
     return (info->script() != undefined) &&
         (reinterpret_cast<Script*>(info->script())->source() != undefined);
   }
@@ -570,15 +573,15 @@
 
   inline static bool IsCompiled(JSFunction* function) {
     return function->unchecked_code() !=
-        Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+        function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
   inline static bool IsCompiled(SharedFunctionInfo* function) {
     return function->unchecked_code() !=
-        Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+        function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
-  inline static bool IsFlushable(JSFunction* function) {
+  inline static bool IsFlushable(Heap* heap, JSFunction* function) {
     SharedFunctionInfo* shared_info = function->unchecked_shared();
 
     // Code is either on stack, in compilation cache or referenced
@@ -593,10 +596,10 @@
       return false;
     }
 
-    return IsFlushable(shared_info);
+    return IsFlushable(heap, shared_info);
   }
 
-  inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
+  inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
     if (shared_info->unchecked_code()->IsMarked()) {
@@ -606,7 +609,7 @@
 
     // The function must be compiled and have the source code available,
     // to be able to recompile it in case we need the function again.
-    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
+    if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
       return false;
     }
 
@@ -638,7 +641,7 @@
 
 
   static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
-    if (!IsFlushable(function)) return false;
+    if (!IsFlushable(heap, function)) return false;
 
     // This function's code looks flushable. But we have to postpone the
     // decision until we see all functions that point to the same
@@ -715,7 +718,7 @@
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
 
     if (!known_flush_code_candidate) {
-      known_flush_code_candidate = IsFlushable(shared);
+      known_flush_code_candidate = IsFlushable(heap, shared);
       if (known_flush_code_candidate) {
         heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
       }
@@ -865,16 +868,16 @@
     StaticMarkingVisitor::VisitPointers(heap_, start, end);
   }
 
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitCodeTarget(rinfo);
+  void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
   }
 
-  void VisitGlobalPropertyCell(RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
+  void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
   }
 
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitDebugTarget(rinfo);
+  void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
   }
 
  private:
@@ -922,7 +925,7 @@
 
 
 void MarkCompactCollector::PrepareForCodeFlushing() {
-  ASSERT(heap_ == Isolate::Current()->heap());
+  ASSERT(heap() == Isolate::Current()->heap());
 
   if (!FLAG_flush_code) {
     EnableCodeFlushing(false);
@@ -930,8 +933,8 @@
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  if (heap_->isolate()->debug()->IsLoaded() ||
-      heap_->isolate()->debug()->has_break_points()) {
+  if (heap()->isolate()->debug()->IsLoaded() ||
+      heap()->isolate()->debug()->has_break_points()) {
     EnableCodeFlushing(false);
     return;
   }
@@ -940,10 +943,10 @@
 
   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   // relies on it being marked before any other descriptor array.
-  MarkObject(heap_->raw_unchecked_empty_descriptor_array());
+  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
 
   // Make sure we are not referencing the code from the stack.
-  ASSERT(this == heap_->mark_compact_collector());
+  ASSERT(this == heap()->mark_compact_collector());
   for (StackFrameIterator it; !it.done(); it.Advance()) {
     MarkObject(it.frame()->unchecked_code());
   }
@@ -951,12 +954,12 @@
   // Iterate the archived stacks in all threads to check if
   // the code is referenced.
   CodeMarkingVisitor code_marking_visitor(this);
-  heap_->isolate()->thread_manager()->IterateArchivedThreads(
+  heap()->isolate()->thread_manager()->IterateArchivedThreads(
       &code_marking_visitor);
 
   SharedFunctionInfoMarkingVisitor visitor(this);
-  heap_->isolate()->compilation_cache()->IterateFunctions(&visitor);
-  heap_->isolate()->handle_scope_implementer()->Iterate(&visitor);
+  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
+  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
   ProcessMarkingStack();
 }
@@ -1004,7 +1007,8 @@
 // Helper class for pruning the symbol table.
 class SymbolTableCleaner : public ObjectVisitor {
  public:
-  SymbolTableCleaner() : pointers_removed_(0) { }
+  explicit SymbolTableCleaner(Heap* heap)
+    : heap_(heap), pointers_removed_(0) { }
 
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
@@ -1016,10 +1020,10 @@
         // Since no objects have yet been moved we can safely access the map of
         // the object.
         if ((*p)->IsExternalString()) {
-          HEAP->FinalizeExternalString(String::cast(*p));
+          heap_->FinalizeExternalString(String::cast(*p));
         }
         // Set the entry to null_value (as deleted).
-        *p = HEAP->raw_unchecked_null_value();
+        *p = heap_->raw_unchecked_null_value();
         pointers_removed_++;
       }
     }
@@ -1029,6 +1033,7 @@
     return pointers_removed_;
   }
  private:
+  Heap* heap_;
   int pointers_removed_;
 };
 
@@ -1054,7 +1059,7 @@
   if (object->IsMap()) {
     Map* map = Map::cast(object);
     if (FLAG_cleanup_caches_in_maps_at_gc) {
-      map->ClearCodeCache(heap_);
+      map->ClearCodeCache(heap());
     }
     SetMark(map);
     if (FLAG_collect_maps &&
@@ -1125,7 +1130,7 @@
 
 
 void MarkCompactCollector::CreateBackPointers() {
-  HeapObjectIterator iterator(HEAP->map_space());
+  HeapObjectIterator iterator(heap()->map_space());
   for (HeapObject* next_object = iterator.next();
        next_object != NULL; next_object = iterator.next()) {
     if (next_object->IsMap()) {  // Could also be ByteArray on free list.
@@ -1134,7 +1139,7 @@
           map->instance_type() <= JS_FUNCTION_TYPE) {
         map->CreateBackPointers();
       } else {
-        ASSERT(map->instance_descriptors() == HEAP->empty_descriptor_array());
+        ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
       }
     }
   }
@@ -1182,11 +1187,11 @@
 
 
 void MarkCompactCollector::MarkSymbolTable() {
-  SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
   // Mark the symbol table itself.
   SetMark(symbol_table);
   // Explicitly mark the prefix.
-  MarkingVisitor marker(heap_);
+  MarkingVisitor marker(heap());
   symbol_table->IteratePrefix(&marker);
   ProcessMarkingStack();
 }
@@ -1195,7 +1200,7 @@
 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
   // Mark the heap roots including global variables, stack variables,
   // etc., and all objects reachable from them.
-  HEAP->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
 
   // Handle the symbol table specially.
   MarkSymbolTable();
@@ -1210,7 +1215,7 @@
 
 void MarkCompactCollector::MarkObjectGroups() {
   List<ObjectGroup*>* object_groups =
-      heap_->isolate()->global_handles()->object_groups();
+      heap()->isolate()->global_handles()->object_groups();
 
   for (int i = 0; i < object_groups->length(); i++) {
     ObjectGroup* entry = object_groups->at(i);
@@ -1246,7 +1251,7 @@
 
 void MarkCompactCollector::MarkImplicitRefGroups() {
   List<ImplicitRefGroup*>* ref_groups =
-      heap_->isolate()->global_handles()->implicit_ref_groups();
+      heap()->isolate()->global_handles()->implicit_ref_groups();
 
   for (int i = 0; i < ref_groups->length(); i++) {
     ImplicitRefGroup* entry = ref_groups->at(i);
@@ -1279,7 +1284,7 @@
   while (!marking_stack_.is_empty()) {
     HeapObject* object = marking_stack_.Pop();
     ASSERT(object->IsHeapObject());
-    ASSERT(heap_->Contains(object));
+    ASSERT(heap()->Contains(object));
     ASSERT(object->IsMarked());
     ASSERT(!object->IsOverflowed());
 
@@ -1303,32 +1308,32 @@
 void MarkCompactCollector::RefillMarkingStack() {
   ASSERT(marking_stack_.overflowed());
 
-  SemiSpaceIterator new_it(HEAP->new_space(), &OverflowObjectSize);
+  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator old_pointer_it(HEAP->old_pointer_space(),
+  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
                                     &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator old_data_it(HEAP->old_data_space(), &OverflowObjectSize);
+  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator code_it(HEAP->code_space(), &OverflowObjectSize);
+  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator map_it(HEAP->map_space(), &OverflowObjectSize);
+  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator cell_it(HEAP->cell_space(), &OverflowObjectSize);
+  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
   if (marking_stack_.is_full()) return;
 
-  LargeObjectIterator lo_it(HEAP->lo_space(), &OverflowObjectSize);
+  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
   if (marking_stack_.is_full()) return;
 
@@ -1366,7 +1371,7 @@
   // The recursive GC marker detects when it is nearing stack overflow,
   // and switches to a different marking system.  JS interrupts interfere
   // with the C stack limit check.
-  PostponeInterruptsScope postpone(heap_->isolate());
+  PostponeInterruptsScope postpone(heap()->isolate());
 
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
@@ -1374,14 +1379,14 @@
 #endif
   // The to space contains live objects, the from space is used as a marking
   // stack.
-  marking_stack_.Initialize(heap_->new_space()->FromSpaceLow(),
-                            heap_->new_space()->FromSpaceHigh());
+  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
+                            heap()->new_space()->FromSpaceHigh());
 
   ASSERT(!marking_stack_.overflowed());
 
   PrepareForCodeFlushing();
 
-  RootMarkingVisitor root_visitor(heap_);
+  RootMarkingVisitor root_visitor(heap());
   MarkRoots(&root_visitor);
 
   // The objects reachable from the roots are marked, yet unreachable
@@ -1395,10 +1400,10 @@
   //
   // First we identify nonlive weak handles and mark them as pending
   // destruction.
-  heap_->isolate()->global_handles()->IdentifyWeakHandles(
+  heap()->isolate()->global_handles()->IdentifyWeakHandles(
       &IsUnmarkedHeapObject);
   // Then we mark the objects and process the transitive closure.
-  heap_->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
   while (marking_stack_.overflowed()) {
     RefillMarkingStack();
     EmptyMarkingStack();
@@ -1411,20 +1416,20 @@
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
-  SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
-  SymbolTableCleaner v;
+  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTableCleaner v(heap());
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
-  heap_->external_string_table_.Iterate(&v);
-  heap_->external_string_table_.CleanUp();
+  heap()->external_string_table_.Iterate(&v);
+  heap()->external_string_table_.CleanUp();
 
   // Process the weak references.
   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
-  heap_->ProcessWeakReferences(&mark_compact_object_retainer);
+  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
 
   // Remove object groups after marking phase.
-  heap_->isolate()->global_handles()->RemoveObjectGroups();
-  heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
+  heap()->isolate()->global_handles()->RemoveObjectGroups();
+  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
 
   // Flush code from collected candidates.
   if (is_code_flushing_enabled()) {
@@ -1432,28 +1437,28 @@
   }
 
   // Clean up dead objects from the runtime profiler.
-  heap_->isolate()->runtime_profiler()->RemoveDeadSamples();
+  heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
 }
 
 
 #ifdef DEBUG
 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
   live_bytes_ += obj->Size();
-  if (HEAP->new_space()->Contains(obj)) {
+  if (heap()->new_space()->Contains(obj)) {
     live_young_objects_size_ += obj->Size();
-  } else if (HEAP->map_space()->Contains(obj)) {
+  } else if (heap()->map_space()->Contains(obj)) {
     ASSERT(obj->IsMap());
     live_map_objects_size_ += obj->Size();
-  } else if (HEAP->cell_space()->Contains(obj)) {
+  } else if (heap()->cell_space()->Contains(obj)) {
     ASSERT(obj->IsJSGlobalPropertyCell());
     live_cell_objects_size_ += obj->Size();
-  } else if (HEAP->old_pointer_space()->Contains(obj)) {
+  } else if (heap()->old_pointer_space()->Contains(obj)) {
     live_old_pointer_objects_size_ += obj->Size();
-  } else if (HEAP->old_data_space()->Contains(obj)) {
+  } else if (heap()->old_data_space()->Contains(obj)) {
     live_old_data_objects_size_ += obj->Size();
-  } else if (HEAP->code_space()->Contains(obj)) {
+  } else if (heap()->code_space()->Contains(obj)) {
     live_code_objects_size_ += obj->Size();
-  } else if (HEAP->lo_space()->Contains(obj)) {
+  } else if (heap()->lo_space()->Contains(obj)) {
     live_lo_objects_size_ += obj->Size();
   } else {
     UNREACHABLE();
@@ -1469,7 +1474,7 @@
       compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
 #endif
   // Deallocate unmarked objects and clear marked bits for marked objects.
-  HEAP->lo_space()->FreeUnmarkedObjects();
+  heap()->lo_space()->FreeUnmarkedObjects();
 }
 
 
@@ -1482,7 +1487,7 @@
 
 
 void MarkCompactCollector::ClearNonLiveTransitions() {
-  HeapObjectIterator map_iterator(HEAP->map_space(), &SizeOfMarkedObject);
+  HeapObjectIterator map_iterator(heap() ->map_space(), &SizeOfMarkedObject);
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  At the same time,
   // set all the prototype fields of maps back to their original value,
@@ -1532,7 +1537,7 @@
       // This test will always be false on the first iteration.
       if (on_dead_path && current->IsMarked()) {
         on_dead_path = false;
-        current->ClearNonLiveTransitions(heap_, real_prototype);
+        current->ClearNonLiveTransitions(heap(), real_prototype);
       }
       *HeapObject::RawField(current, Map::kPrototypeOffset) =
           real_prototype;
@@ -1690,7 +1695,7 @@
 
 
 // Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object) {}
+inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
 
 
 // Function template that, given a range of addresses (eg, a semispace or a
@@ -1744,7 +1749,7 @@
       }
     } else {  // Non-live object.
       object_size = object->Size();
-      ProcessNonLive(object);
+      ProcessNonLive(object, collector->heap()->isolate());
       if (is_prev_alive) {  // Transition from live to non-live.
         free_start = current;
         is_prev_alive = false;
@@ -1767,8 +1772,8 @@
                                    EncodeForwardingAddressInNewSpace,
                                    IgnoreNonLiveObject>(
       this,
-      heap_->new_space()->bottom(),
-      heap_->new_space()->top(),
+      heap()->new_space()->bottom(),
+      heap()->new_space()->top(),
       &ignored);
 }
 
@@ -2089,7 +2094,8 @@
           is_previous_alive = true;
         }
       } else {
-        heap->mark_compact_collector()->ReportDeleteIfNeeded(object);
+        heap->mark_compact_collector()->ReportDeleteIfNeeded(
+            object, heap->isolate());
         if (is_previous_alive) {  // Transition from live to free.
           free_start = current;
           is_previous_alive = false;
@@ -2189,24 +2195,24 @@
   // Objects in the active semispace of the young generation may be
   // relocated to the inactive semispace (if not promoted).  Set the
   // relocation info to the beginning of the inactive semispace.
-  heap_->new_space()->MCResetRelocationInfo();
+  heap()->new_space()->MCResetRelocationInfo();
 
   // Compute the forwarding pointers in each space.
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
                                         ReportDeleteIfNeeded>(
-      heap_->old_pointer_space());
+      heap()->old_pointer_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
                                         IgnoreNonLiveObject>(
-      heap_->old_data_space());
+      heap()->old_data_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
                                         ReportDeleteIfNeeded>(
-      heap_->code_space());
+      heap()->code_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
                                         IgnoreNonLiveObject>(
-      heap_->cell_space());
+      heap()->cell_space());
 
 
   // Compute new space next to last after the old and code spaces have been
@@ -2218,25 +2224,26 @@
   // non-live map pointers to get the sizes of non-live objects.
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
                                         IgnoreNonLiveObject>(
-      heap_->map_space());
+      heap()->map_space());
 
   // Write relocation info to the top page, so we can use it later.  This is
   // done after promoting objects from the new space so we get the correct
   // allocation top.
-  heap_->old_pointer_space()->MCWriteRelocationInfoToPage();
-  heap_->old_data_space()->MCWriteRelocationInfoToPage();
-  heap_->code_space()->MCWriteRelocationInfoToPage();
-  heap_->map_space()->MCWriteRelocationInfoToPage();
-  heap_->cell_space()->MCWriteRelocationInfoToPage();
+  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
+  heap()->old_data_space()->MCWriteRelocationInfoToPage();
+  heap()->code_space()->MCWriteRelocationInfoToPage();
+  heap()->map_space()->MCWriteRelocationInfoToPage();
+  heap()->cell_space()->MCWriteRelocationInfoToPage();
 }
 
 
 class MapIterator : public HeapObjectIterator {
  public:
-  MapIterator() : HeapObjectIterator(HEAP->map_space(), &SizeCallback) { }
+  explicit MapIterator(Heap* heap)
+    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
 
-  explicit MapIterator(Address start)
-      : HeapObjectIterator(HEAP->map_space(), start, &SizeCallback) { }
+  MapIterator(Heap* heap, Address start)
+      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
 
  private:
   static int SizeCallback(HeapObject* unused) {
@@ -2252,7 +2259,8 @@
     : heap_(heap),
       live_maps_(live_maps),
       to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
-      map_to_evacuate_it_(to_evacuate_start_),
+      vacant_map_it_(heap),
+      map_to_evacuate_it_(heap, to_evacuate_start_),
       first_map_to_evacuate_(
           reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
   }
@@ -2273,36 +2281,41 @@
 
   void UpdateMapPointersInRoots() {
     MapUpdatingVisitor map_updating_visitor;
-    heap_->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
-    heap_->isolate()->global_handles()->IterateWeakRoots(&map_updating_visitor);
+    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+    heap()->isolate()->global_handles()->IterateWeakRoots(
+        &map_updating_visitor);
     LiveObjectList::IterateElements(&map_updating_visitor);
   }
 
   void UpdateMapPointersInPagedSpace(PagedSpace* space) {
-    ASSERT(space != heap_->map_space());
+    ASSERT(space != heap()->map_space());
 
     PageIterator it(space, PageIterator::PAGES_IN_USE);
     while (it.has_next()) {
       Page* p = it.next();
-      UpdateMapPointersInRange(heap_, p->ObjectAreaStart(), p->AllocationTop());
+      UpdateMapPointersInRange(heap(),
+                               p->ObjectAreaStart(),
+                               p->AllocationTop());
     }
   }
 
   void UpdateMapPointersInNewSpace() {
-    NewSpace* space = heap_->new_space();
-    UpdateMapPointersInRange(heap_, space->bottom(), space->top());
+    NewSpace* space = heap()->new_space();
+    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
   }
 
   void UpdateMapPointersInLargeObjectSpace() {
-    LargeObjectIterator it(heap_->lo_space());
+    LargeObjectIterator it(heap()->lo_space());
     for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
-      UpdateMapPointersInObject(heap_, obj);
+      UpdateMapPointersInObject(heap(), obj);
   }
 
   void Finish() {
-    heap_->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
   }
 
+  inline Heap* heap() const { return heap_; }
+
  private:
   Heap* heap_;
   int live_maps_;
@@ -2452,26 +2465,26 @@
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(heap_, heap_->old_pointer_space());
-  SweepSpace(heap_, heap_->old_data_space());
-  SweepSpace(heap_, heap_->code_space());
-  SweepSpace(heap_, heap_->cell_space());
+  SweepSpace(heap(), heap()->old_pointer_space());
+  SweepSpace(heap(), heap()->old_data_space());
+  SweepSpace(heap(), heap()->code_space());
+  SweepSpace(heap(), heap()->cell_space());
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    SweepNewSpace(heap_, heap_->new_space());
+    SweepNewSpace(heap(), heap()->new_space());
   }
-  SweepSpace(heap_, heap_->map_space());
+  SweepSpace(heap(), heap()->map_space());
 
-  heap_->IterateDirtyRegions(heap_->map_space(),
-                             &heap_->IteratePointersInDirtyMapsRegion,
+  heap()->IterateDirtyRegions(heap()->map_space(),
+                             &heap()->IteratePointersInDirtyMapsRegion,
                              &UpdatePointerToNewGen,
-                             heap_->WATERMARK_SHOULD_BE_VALID);
+                             heap()->WATERMARK_SHOULD_BE_VALID);
 
-  intptr_t live_maps_size = heap_->map_space()->Size();
+  intptr_t live_maps_size = heap()->map_space()->Size();
   int live_maps = static_cast<int>(live_maps_size / Map::kSize);
   ASSERT(live_map_objects_size_ == live_maps_size);
 
-  if (heap_->map_space()->NeedsCompaction(live_maps)) {
-    MapCompact map_compact(heap_, live_maps);
+  if (heap()->map_space()->NeedsCompaction(live_maps)) {
+    MapCompact map_compact(heap(), live_maps);
 
     map_compact.CompactMaps();
     map_compact.UpdateMapPointersInRoots();
@@ -2479,7 +2492,7 @@
     PagedSpaces spaces;
     for (PagedSpace* space = spaces.next();
          space != NULL; space = spaces.next()) {
-      if (space == heap_->map_space()) continue;
+      if (space == heap()->map_space()) continue;
       map_compact.UpdateMapPointersInPagedSpace(space);
     }
     map_compact.UpdateMapPointersInNewSpace();
@@ -2575,6 +2588,8 @@
         reinterpret_cast<Code*>(target)->instruction_start());
   }
 
+  inline Heap* heap() const { return heap_; }
+
  private:
   void UpdatePointer(Object** p) {
     if (!(*p)->IsHeapObject()) return;
@@ -2582,27 +2597,27 @@
     HeapObject* obj = HeapObject::cast(*p);
     Address old_addr = obj->address();
     Address new_addr;
-    ASSERT(!heap_->InFromSpace(obj));
+    ASSERT(!heap()->InFromSpace(obj));
 
-    if (heap_->new_space()->Contains(obj)) {
+    if (heap()->new_space()->Contains(obj)) {
       Address forwarding_pointer_addr =
-          heap_->new_space()->FromSpaceLow() +
-          heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
+          heap()->new_space()->FromSpaceLow() +
+          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
       new_addr = Memory::Address_at(forwarding_pointer_addr);
 
 #ifdef DEBUG
-      ASSERT(heap_->old_pointer_space()->Contains(new_addr) ||
-             heap_->old_data_space()->Contains(new_addr) ||
-             heap_->new_space()->FromSpaceContains(new_addr) ||
-             heap_->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
+             heap()->old_data_space()->Contains(new_addr) ||
+             heap()->new_space()->FromSpaceContains(new_addr) ||
+             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
 
-      if (heap_->new_space()->FromSpaceContains(new_addr)) {
-        ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-               heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
+      if (heap()->new_space()->FromSpaceContains(new_addr)) {
+        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
       }
 #endif
 
-    } else if (heap_->lo_space()->Contains(obj)) {
+    } else if (heap()->lo_space()->Contains(obj)) {
       // Don't move objects in the large object space.
       return;
 
@@ -2641,34 +2656,34 @@
   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
   state_ = UPDATE_POINTERS;
 #endif
-  UpdatingVisitor updating_visitor(heap_);
-  heap_->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+  UpdatingVisitor updating_visitor(heap());
+  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
       &updating_visitor);
-  heap_->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
-  heap_->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
+  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
 
   // Update the pointer to the head of the weak list of global contexts.
-  updating_visitor.VisitPointer(&heap_->global_contexts_list_);
+  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
 
   LiveObjectList::IterateElements(&updating_visitor);
 
   int live_maps_size = IterateLiveObjects(
-      heap_->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
   int live_pointer_olds_size = IterateLiveObjects(
-      heap_->old_pointer_space(),
+      heap()->old_pointer_space(),
       &MarkCompactCollector::UpdatePointersInOldObject);
   int live_data_olds_size = IterateLiveObjects(
-      heap_->old_data_space(),
+      heap()->old_data_space(),
       &MarkCompactCollector::UpdatePointersInOldObject);
   int live_codes_size = IterateLiveObjects(
-      heap_->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
   int live_cells_size = IterateLiveObjects(
-      heap_->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
   int live_news_size = IterateLiveObjects(
-      heap_->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
+      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
 
   // Large objects do not move, the map word can be updated directly.
-  LargeObjectIterator it(heap_->lo_space());
+  LargeObjectIterator it(heap()->lo_space());
   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     UpdatePointersInNewObject(obj);
   }
@@ -2695,8 +2710,8 @@
 
   Address forwarded = GetForwardingAddressInOldSpace(old_map);
 
-  ASSERT(heap_->map_space()->Contains(old_map));
-  ASSERT(heap_->map_space()->Contains(forwarded));
+  ASSERT(heap()->map_space()->Contains(old_map));
+  ASSERT(heap()->map_space()->Contains(forwarded));
 #ifdef DEBUG
   if (FLAG_gc_verbose) {
     PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
@@ -2711,7 +2726,7 @@
   int obj_size = obj->SizeFromMap(old_map);
 
   // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap_);
+  UpdatingVisitor updating_visitor(heap());
   obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
   return obj_size;
 }
@@ -2720,8 +2735,8 @@
 int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
   // Decode the map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
 
   // At this point, the first word of map_addr is also encoded, cannot
   // cast it to Map* using Map::cast.
@@ -2742,7 +2757,7 @@
 #endif
 
   // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap_);
+  UpdatingVisitor updating_visitor(heap());
   obj->IterateBody(type, obj_size, &updating_visitor);
   return obj_size;
 }
@@ -2799,18 +2814,18 @@
   // Relocates objects, always relocate map objects first. Relocating
   // objects in other space relies on map objects to get object size.
   int live_maps_size = IterateLiveObjects(
-      heap_->map_space(), &MarkCompactCollector::RelocateMapObject);
+      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
   int live_pointer_olds_size = IterateLiveObjects(
-      heap_->old_pointer_space(),
+      heap()->old_pointer_space(),
       &MarkCompactCollector::RelocateOldPointerObject);
   int live_data_olds_size = IterateLiveObjects(
-      heap_->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
   int live_codes_size = IterateLiveObjects(
-      heap_->code_space(), &MarkCompactCollector::RelocateCodeObject);
+      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
   int live_cells_size = IterateLiveObjects(
-      heap_->cell_space(), &MarkCompactCollector::RelocateCellObject);
+      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
   int live_news_size = IterateLiveObjects(
-      heap_->new_space(), &MarkCompactCollector::RelocateNewObject);
+      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
 
   USE(live_maps_size);
   USE(live_pointer_olds_size);
@@ -2826,28 +2841,28 @@
   ASSERT(live_news_size == live_young_objects_size_);
 
   // Flip from and to spaces
-  heap_->new_space()->Flip();
+  heap()->new_space()->Flip();
 
-  heap_->new_space()->MCCommitRelocationInfo();
+  heap()->new_space()->MCCommitRelocationInfo();
 
   // Set age_mark to bottom in to space
-  Address mark = heap_->new_space()->bottom();
-  heap_->new_space()->set_age_mark(mark);
+  Address mark = heap()->new_space()->bottom();
+  heap()->new_space()->set_age_mark(mark);
 
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
     space->MCCommitRelocationInfo();
 
-  heap_->CheckNewSpaceExpansionCriteria();
-  heap_->IncrementYoungSurvivorsCounter(live_news_size);
+  heap()->CheckNewSpaceExpansionCriteria();
+  heap()->IncrementYoungSurvivorsCounter(live_news_size);
 }
 
 
 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
   // Recover map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
 
   // Get forwarding address before resetting map pointer
   Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2860,7 +2875,7 @@
 
   if (new_addr != old_addr) {
     // Move contents.
-    heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
                                                    old_addr,
                                                    Map::kSize);
   }
@@ -2906,8 +2921,8 @@
                                                    PagedSpace* space) {
   // Recover map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(map_addr));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(map_addr));
 
   // Get forwarding address before resetting map pointer.
   Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2919,10 +2934,10 @@
 
   if (new_addr != old_addr) {
     // Move contents.
-    if (space == heap_->old_data_space()) {
-      heap_->MoveBlock(new_addr, old_addr, obj_size);
+    if (space == heap()->old_data_space()) {
+      heap()->MoveBlock(new_addr, old_addr, obj_size);
     } else {
-      heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
                                                      old_addr,
                                                      obj_size);
     }
@@ -2932,47 +2947,47 @@
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap_->isolate(),
+    PROFILE(heap()->isolate(),
             SharedFunctionInfoMoveEvent(old_addr, new_addr));
   }
-  HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
 
   return obj_size;
 }
 
 
 int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap_->old_pointer_space());
+  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
 }
 
 
 int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap_->old_data_space());
+  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
 }
 
 
 int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap_->cell_space());
+  return RelocateOldNonCodeObject(obj, heap()->cell_space());
 }
 
 
 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
   // Recover map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
 
   // Get forwarding address before resetting map pointer
   Address new_addr = GetForwardingAddressInOldSpace(obj);
 
   // Reset the map pointer.
-  int obj_size = RestoreMap(obj, heap_->code_space(), new_addr, map_addr);
+  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
 
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
     // Move contents.
-    heap_->MoveBlock(new_addr, old_addr, obj_size);
+    heap()->MoveBlock(new_addr, old_addr, obj_size);
   }
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2980,9 +2995,9 @@
     // May also update inline cache target.
     Code::cast(copied_to)->Relocate(new_addr - old_addr);
     // Notify the logger that compiled code has moved.
-    PROFILE(heap_->isolate(), CodeMoveEvent(old_addr, new_addr));
+    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
   }
-  HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
 
   return obj_size;
 }
@@ -2993,26 +3008,26 @@
 
   // Get forwarding address
   Address old_addr = obj->address();
-  int offset = heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
+  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
 
   Address new_addr =
-    Memory::Address_at(heap_->new_space()->FromSpaceLow() + offset);
+    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
 
 #ifdef DEBUG
-  if (heap_->new_space()->FromSpaceContains(new_addr)) {
-    ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-           heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
+  if (heap()->new_space()->FromSpaceContains(new_addr)) {
+    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
   } else {
-    ASSERT(heap_->TargetSpace(obj) == heap_->old_pointer_space() ||
-           heap_->TargetSpace(obj) == heap_->old_data_space());
+    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
+           heap()->TargetSpace(obj) == heap()->old_data_space());
   }
 #endif
 
   // New and old addresses cannot overlap.
-  if (heap_->InNewSpace(HeapObject::FromAddress(new_addr))) {
-    heap_->CopyBlock(new_addr, old_addr, obj_size);
+  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
+    heap()->CopyBlock(new_addr, old_addr, obj_size);
   } else {
-    heap_->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
                                                    old_addr,
                                                    obj_size);
   }
@@ -3025,10 +3040,10 @@
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap_->isolate(),
+    PROFILE(heap()->isolate(),
             SharedFunctionInfoMoveEvent(old_addr, new_addr));
   }
-  HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
 
   return obj_size;
 }
@@ -3037,7 +3052,7 @@
 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
   if (enable) {
     if (code_flusher_ != NULL) return;
-    code_flusher_ = new CodeFlusher(heap_->isolate());
+    code_flusher_ = new CodeFlusher(heap()->isolate());
   } else {
     if (code_flusher_ == NULL) return;
     delete code_flusher_;
@@ -3046,7 +3061,8 @@
 }
 
 
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+                                                Isolate* isolate) {
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (obj->IsCode()) {
     GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
@@ -3054,7 +3070,7 @@
 #endif
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (obj->IsCode()) {
-    PROFILE(ISOLATE, CodeDeleteEvent(obj->address()));
+    PROFILE(isolate, CodeDeleteEvent(obj->address()));
   }
 #endif
 }
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 3c9d28b..04d0ff6 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -98,8 +98,6 @@
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
-//
-// All methods are static.
 
 class OverflowedObjectsScanner;
 
@@ -129,7 +127,7 @@
                                    int* offset);
 
   // Type of functions to process non-live objects.
-  typedef void (*ProcessNonLiveFunction)(HeapObject* object);
+  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
 
   // Pointer to member function, used in IterateLiveObjects.
   typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
@@ -179,7 +177,7 @@
 #endif
 
   // Determine type of object and emit deletion log event.
-  static void ReportDeleteIfNeeded(HeapObject* obj);
+  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
   // Returns size of a possibly marked object.
   static int SizeOfMarkedObject(HeapObject* obj);
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 6441470..f507590 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -147,7 +147,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 5395bbb..37c51d7 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -2862,6 +2862,34 @@
 }
 
 
+Heap* Code::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Isolate* Code::isolate() {
+  return heap()->isolate();
+}
+
+
+Heap* JSGlobalPropertyCell::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Isolate* JSGlobalPropertyCell::isolate() {
+  return heap()->isolate();
+}
+
+
 Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
   return HeapObject::
       FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3028,10 +3056,6 @@
             kHasOnlySimpleThisPropertyAssignments)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
-               try_full_codegen,
-               kTryFullCodegen)
-BOOL_ACCESSORS(SharedFunctionInfo,
-               compiler_hints,
                allows_lazy_compilation,
                kAllowLazyCompilation)
 
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 42f9060..da955da 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -141,13 +141,22 @@
 template<typename Callback>
 class VisitorDispatchTable {
  public:
+  void CopyFrom(VisitorDispatchTable* other) {
+    // We are not using memcpy to guarantee that during update
+    // every element of callbacks_ array will remain correct
+    // pointer (memcpy might be implemented as a byte copying loop).
+    for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
+      NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
+    }
+  }
+
   inline Callback GetVisitor(Map* map) {
-    return callbacks_[map->visitor_id()];
+    return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
   }
 
   void Register(StaticVisitorBase::VisitorId id, Callback callback) {
     ASSERT(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
-    callbacks_[id] = callback;
+    callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
   }
 
   template<typename Visitor,
@@ -179,7 +188,7 @@
   }
 
  private:
-  Callback callbacks_[StaticVisitorBase::kVisitorIdCount];
+  AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
 };
 
 
diff --git a/src/objects.cc b/src/objects.cc
index 8cb36e9..9a5357a 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -113,39 +113,47 @@
   if (IsSmi()) {
     return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
   }
-  if (IsUndefined() || IsNull()) {
-    return HeapObject::cast(this)->GetHeap()->false_value();
+  HeapObject* heap_object = HeapObject::cast(this);
+  if (heap_object->IsUndefined() || heap_object->IsNull()) {
+    return heap_object->GetHeap()->false_value();
   }
   // Undetectable object is false
-  if (IsUndetectableObject()) {
-    return HeapObject::cast(this)->GetHeap()->false_value();
+  if (heap_object->IsUndetectableObject()) {
+    return heap_object->GetHeap()->false_value();
   }
-  if (IsString()) {
-    return HeapObject::cast(this)->GetHeap()->ToBoolean(
+  if (heap_object->IsString()) {
+    return heap_object->GetHeap()->ToBoolean(
         String::cast(this)->length() != 0);
   }
-  if (IsHeapNumber()) {
+  if (heap_object->IsHeapNumber()) {
     return HeapNumber::cast(this)->HeapNumberToBoolean();
   }
-  return Isolate::Current()->heap()->true_value();
+  return heap_object->GetHeap()->true_value();
 }
 
 
 void Object::Lookup(String* name, LookupResult* result) {
-  if (IsJSObject()) return JSObject::cast(this)->Lookup(name, result);
   Object* holder = NULL;
-  if (IsString()) {
-    Heap* heap = HeapObject::cast(this)->GetHeap();
-    Context* global_context = heap->isolate()->context()->global_context();
-    holder = global_context->string_function()->instance_prototype();
-  } else if (IsNumber()) {
+  if (IsSmi()) {
     Heap* heap = Isolate::Current()->heap();
     Context* global_context = heap->isolate()->context()->global_context();
     holder = global_context->number_function()->instance_prototype();
-  } else if (IsBoolean()) {
-    Heap* heap = HeapObject::cast(this)->GetHeap();
-    Context* global_context = heap->isolate()->context()->global_context();
-    holder = global_context->boolean_function()->instance_prototype();
+  } else {
+    HeapObject* heap_object = HeapObject::cast(this);
+    if (heap_object->IsJSObject()) {
+      return JSObject::cast(this)->Lookup(name, result);
+    }
+    Heap* heap = heap_object->GetHeap();
+    if (heap_object->IsString()) {
+      Context* global_context = heap->isolate()->context()->global_context();
+      holder = global_context->string_function()->instance_prototype();
+    } else if (heap_object->IsHeapNumber()) {
+      Context* global_context = heap->isolate()->context()->global_context();
+      holder = global_context->number_function()->instance_prototype();
+    } else if (heap_object->IsBoolean()) {
+      Context* global_context = heap->isolate()->context()->global_context();
+      holder = global_context->boolean_function()->instance_prototype();
+    }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
   JSObject::cast(holder)->Lookup(name, result);
@@ -247,7 +255,6 @@
     LookupResult* result,
     String* name,
     PropertyAttributes* attributes) {
-  Heap* heap = name->GetHeap();
   if (result->IsProperty()) {
     switch (result->type()) {
       case CALLBACKS: {
@@ -299,6 +306,7 @@
 
   // No accessible property found.
   *attributes = ABSENT;
+  Heap* heap = name->GetHeap();
   heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
   return heap->undefined_value();
 }
@@ -309,7 +317,6 @@
     LookupResult* result,
     String* name,
     bool continue_search) {
-  Heap* heap = name->GetHeap();
   if (result->IsProperty()) {
     switch (result->type()) {
       case CALLBACKS: {
@@ -363,7 +370,7 @@
     }
   }
 
-  heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
   return ABSENT;
 }
 
@@ -397,11 +404,11 @@
                                              Object* value,
                                              PropertyDetails details) {
   ASSERT(!HasFastProperties());
-  Heap* heap = name->GetHeap();
   int entry = property_dictionary()->FindEntry(name);
   if (entry == StringDictionary::kNotFound) {
     Object* store_value = value;
     if (IsGlobalObject()) {
+      Heap* heap = name->GetHeap();
       MaybeObject* maybe_store_value =
           heap->AllocateJSGlobalPropertyCell(value);
       if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
@@ -433,7 +440,6 @@
 
 MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
   ASSERT(!HasFastProperties());
-  Heap* heap = GetHeap();
   StringDictionary* dictionary = property_dictionary();
   int entry = dictionary->FindEntry(name);
   if (entry != StringDictionary::kNotFound) {
@@ -441,7 +447,7 @@
     if (IsGlobalObject()) {
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.IsDontDelete()) {
-        if (mode != FORCE_DELETION) return heap->false_value();
+        if (mode != FORCE_DELETION) return GetHeap()->false_value();
         // When forced to delete global properties, we have to make a
         // map change to invalidate any ICs that think they can load
         // from the DontDelete cell without checking if it contains
@@ -454,13 +460,13 @@
       }
       JSGlobalPropertyCell* cell =
           JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
-      cell->set_value(heap->the_hole_value());
+      cell->set_value(cell->heap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
       return dictionary->DeleteProperty(entry, mode);
     }
   }
-  return heap->true_value();
+  return GetHeap()->true_value();
 }
 
 
@@ -550,22 +556,31 @@
 
 
 MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
-  if (IsJSObject()) {
-    return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
-  }
-
   Object* holder = NULL;
-  Context* global_context = Isolate::Current()->context()->global_context();
-  if (IsString()) {
-    holder = global_context->string_function()->instance_prototype();
-  } else if (IsNumber()) {
+  if (IsSmi()) {
+    Context* global_context = Isolate::Current()->context()->global_context();
     holder = global_context->number_function()->instance_prototype();
-  } else if (IsBoolean()) {
-    holder = global_context->boolean_function()->instance_prototype();
   } else {
-    // Undefined and null have no indexed properties.
-    ASSERT(IsUndefined() || IsNull());
-    return HEAP->undefined_value();
+    HeapObject* heap_object = HeapObject::cast(this);
+
+    if (heap_object->IsJSObject()) {
+      return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
+    }
+    Heap* heap = heap_object->GetHeap();
+    Isolate* isolate = heap->isolate();
+
+    Context* global_context = isolate->context()->global_context();
+    if (heap_object->IsString()) {
+      holder = global_context->string_function()->instance_prototype();
+    } else if (heap_object->IsHeapNumber()) {
+      holder = global_context->number_function()->instance_prototype();
+    } else if (heap_object->IsBoolean()) {
+      holder = global_context->boolean_function()->instance_prototype();
+    } else {
+      // Undefined and null have no indexed properties.
+      ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
+      return heap->undefined_value();
+    }
   }
 
   return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
@@ -573,14 +588,28 @@
 
 
 Object* Object::GetPrototype() {
+  if (IsSmi()) {
+    Heap* heap = Isolate::Current()->heap();
+    Context* context = heap->isolate()->context()->global_context();
+    return context->number_function()->instance_prototype();
+  }
+
+  HeapObject* heap_object = HeapObject::cast(this);
+
   // The object is either a number, a string, a boolean, or a real JS object.
-  if (IsJSObject()) return JSObject::cast(this)->map()->prototype();
-  Heap* heap = Isolate::Current()->heap();
+  if (heap_object->IsJSObject()) {
+    return JSObject::cast(this)->map()->prototype();
+  }
+  Heap* heap = heap_object->GetHeap();
   Context* context = heap->isolate()->context()->global_context();
 
-  if (IsNumber()) return context->number_function()->instance_prototype();
-  if (IsString()) return context->string_function()->instance_prototype();
-  if (IsBoolean()) {
+  if (heap_object->IsHeapNumber()) {
+    return context->number_function()->instance_prototype();
+  }
+  if (heap_object->IsString()) {
+    return context->string_function()->instance_prototype();
+  }
+  if (heap_object->IsBoolean()) {
     return context->boolean_function()->instance_prototype();
   } else {
     return heap->null_value();
@@ -908,8 +937,9 @@
     // All other JSObjects are rather similar to each other (JSObject,
     // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
     default: {
-      Heap* heap = GetHeap();
-      Object* constructor = map()->constructor();
+      Map* map_of_this = map();
+      Heap* heap = map_of_this->heap();
+      Object* constructor = map_of_this->constructor();
       bool printed = false;
       if (constructor->IsHeapObject() &&
           !heap->Contains(HeapObject::cast(constructor))) {
@@ -1350,8 +1380,7 @@
     String* name,
     JSFunction* function,
     PropertyAttributes attributes) {
-  Heap* heap = GetHeap();
-  ASSERT(!heap->InNewSpace(function));
+  ASSERT(!GetHeap()->InNewSpace(function));
 
   // Allocate new instance descriptors with (name, function) added
   ConstantFunctionDescriptor d(name, function, attributes);
@@ -1376,6 +1405,7 @@
 
   // If the old map is the global object map (from new Object()),
   // then transitions are not added to it, so we are done.
+  Heap* heap = old_map->heap();
   if (old_map == heap->isolate()->context()->global_context()->
       object_function()->map()) {
     return function;
@@ -1412,7 +1442,6 @@
                                        Object* value,
                                        PropertyAttributes attributes) {
   ASSERT(!HasFastProperties());
-  Heap* heap = GetHeap();
   StringDictionary* dict = property_dictionary();
   Object* store_value = value;
   if (IsGlobalObject()) {
@@ -1429,6 +1458,7 @@
       dict->SetEntry(entry, name, store_value, details);
       return value;
     }
+    Heap* heap = GetHeap();
     { MaybeObject* maybe_store_value =
           heap->AllocateJSGlobalPropertyCell(value);
       if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
@@ -1450,8 +1480,9 @@
                                    PropertyAttributes attributes,
                                    StrictModeFlag strict_mode) {
   ASSERT(!IsJSGlobalProxy());
-  Heap* heap = GetHeap();
-  if (!map()->is_extensible()) {
+  Map* map_of_this = map();
+  Heap* heap = map_of_this->heap();
+  if (!map_of_this->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
       return heap->undefined_value();
     } else {
@@ -1463,7 +1494,7 @@
   }
   if (HasFastProperties()) {
     // Ensure the descriptor array does not get too big.
-    if (map()->instance_descriptors()->number_of_descriptors() <
+    if (map_of_this->instance_descriptors()->number_of_descriptors() <
         DescriptorArray::kMaxNumberOfDescriptors) {
       if (value->IsJSFunction() && !heap->InNewSpace(value)) {
         return AddConstantFunctionProperty(name,
@@ -1537,7 +1568,7 @@
     return result;
   }
   // Do not add transitions to the map of "new Object()".
-  if (map() == GetHeap()->isolate()->context()->global_context()->
+  if (map() == old_map->heap()->isolate()->context()->global_context()->
       object_function()->map()) {
     return result;
   }
@@ -1836,8 +1867,9 @@
 
 MaybeObject* Map::GetExternalArrayElementsMap(ExternalArrayType array_type,
                                               bool safe_to_add_transition) {
+  Heap* current_heap = heap();
   DescriptorArray* descriptors = instance_descriptors();
-  String* external_array_sentinel_name = GetIsolate()->heap()->empty_symbol();
+  String* external_array_sentinel_name = current_heap->empty_symbol();
 
   if (safe_to_add_transition) {
     // It's only safe to manipulate the descriptor array if it would be
@@ -1845,7 +1877,8 @@
 
     ASSERT(!is_shared());  // no transitions can be added to shared maps.
     // Check if the external array transition already exists.
-    DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+    DescriptorLookupCache* cache =
+        current_heap->isolate()->descriptor_lookup_cache();
     int index = cache->Lookup(descriptors, external_array_sentinel_name);
     if (index == DescriptorLookupCache::kAbsent) {
       index = descriptors->Search(external_array_sentinel_name);
@@ -1979,7 +2012,6 @@
                                                         String* name,
                                                         Object* value,
                                                         bool check_prototype) {
-  Heap* heap = GetHeap();
   if (check_prototype && !result->IsProperty()) {
     LookupCallbackSetterInPrototypes(name, result);
   }
@@ -2020,6 +2052,7 @@
 
   HandleScope scope;
   Handle<Object> value_handle(value);
+  Heap* heap = GetHeap();
   heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
   return *value_handle;
 }
@@ -2157,7 +2190,6 @@
     String* name,
     Object* value,
     PropertyAttributes attributes) {
-  Heap* heap = GetHeap();
 
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
@@ -2165,9 +2197,11 @@
   LookupResult result;
   LocalLookup(name, &result);
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()
-      && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+      return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+    }
   }
 
   if (IsJSGlobalProxy()) {
@@ -2318,14 +2352,15 @@
                                                   LookupResult* result,
                                                   String* name,
                                                   bool continue_search) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
-    return GetPropertyAttributeWithFailedAccessCheck(receiver,
-                                                     result,
-                                                     name,
-                                                     continue_search);
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+      return GetPropertyAttributeWithFailedAccessCheck(receiver,
+                                                       result,
+                                                       name,
+                                                       continue_search);
+    }
   }
   if (result->IsProperty()) {
     switch (result->type()) {
@@ -2465,10 +2500,10 @@
   // JSGlobalProxy must never be normalized
   ASSERT(!IsJSGlobalProxy());
 
-  Heap* heap = GetHeap();
+  Map* map_of_this = map();
 
   // Allocate new content.
-  int property_count = map()->NumberOfDescribedProperties();
+  int property_count = map_of_this->NumberOfDescribedProperties();
   if (expected_additional_properties > 0) {
     property_count += expected_additional_properties;
   } else {
@@ -2481,7 +2516,7 @@
   }
   StringDictionary* dictionary = StringDictionary::cast(obj);
 
-  DescriptorArray* descs = map()->instance_descriptors();
+  DescriptorArray* descs = map_of_this->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
     PropertyDetails details = descs->GetDetails(i);
     switch (details.type()) {
@@ -2531,11 +2566,14 @@
     }
   }
 
+  Heap* current_heap = map_of_this->heap();
+
   // Copy the next enumeration index from instance descriptor.
-  int index = map()->instance_descriptors()->NextEnumerationIndex();
+  int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
   dictionary->SetNextEnumerationIndex(index);
 
-  { MaybeObject* maybe_obj = heap->isolate()->context()->global_context()->
+  { MaybeObject* maybe_obj =
+        current_heap->isolate()->context()->global_context()->
         normalized_map_cache()->Get(this, mode);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
@@ -2546,17 +2584,17 @@
 
   // Resize the object in the heap if necessary.
   int new_instance_size = new_map->instance_size();
-  int instance_size_delta = map()->instance_size() - new_instance_size;
+  int instance_size_delta = map_of_this->instance_size() - new_instance_size;
   ASSERT(instance_size_delta >= 0);
-  heap->CreateFillerObjectAt(this->address() + new_instance_size,
-                             instance_size_delta);
+  current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
+                                     instance_size_delta);
 
   set_map(new_map);
-  map()->set_instance_descriptors(heap->empty_descriptor_array());
+  new_map->set_instance_descriptors(current_heap->empty_descriptor_array());
 
   set_properties(dictionary);
 
-  heap->isolate()->counters()->props_to_dictionary()->Increment();
+  current_heap->isolate()->counters()->props_to_dictionary()->Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -2579,10 +2617,11 @@
 MaybeObject* JSObject::NormalizeElements() {
   ASSERT(!HasExternalArrayElements());
   if (HasDictionaryElements()) return this;
-  ASSERT(map()->has_fast_elements());
+  Map* old_map = map();
+  ASSERT(old_map->has_fast_elements());
 
   Object* obj;
-  { MaybeObject* maybe_obj = map()->GetSlowElementsMap();
+  { MaybeObject* maybe_obj = old_map->GetSlowElementsMap();
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
@@ -2617,7 +2656,7 @@
   set_map(new_map);
   set_elements(dictionary);
 
-  new_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+  new_map->heap()->isolate()->counters()->elements_to_dictionary()->
       Increment();
 
 #ifdef DEBUG
@@ -2634,10 +2673,9 @@
 MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
                                                      DeleteMode mode) {
   // Check local property, ignore interceptor.
-  Heap* heap = GetHeap();
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
-  if (!result.IsProperty()) return heap->true_value();
+  if (!result.IsProperty()) return GetHeap()->true_value();
 
   // Normalize object if needed.
   Object* obj;
@@ -2683,7 +2721,6 @@
 
 MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
                                                     DeleteMode mode) {
-  Heap* heap = GetHeap();
   ASSERT(!HasExternalArrayElements());
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
@@ -2711,7 +2748,7 @@
       UNREACHABLE();
       break;
   }
-  return heap->true_value();
+  return GetHeap()->true_value();
 }
 
 
@@ -2884,16 +2921,17 @@
 
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
-  Heap* heap = GetHeap();
+  Map* map_of_this = map();
+  Heap* heap = map_of_this->heap();
   AssertNoAllocation no_alloc;
 
   // Is the object the constructor for this object?
-  if (map()->constructor() == obj) {
+  if (map_of_this->constructor() == obj) {
     return true;
   }
 
   // Is the object the prototype for this object?
-  if (map()->prototype() == obj) {
+  if (map_of_this->prototype() == obj) {
     return true;
   }
 
@@ -3503,7 +3541,6 @@
 
 
 Object* JSObject::SlowReverseLookup(Object* value) {
-  Heap* heap = GetHeap();
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -3517,7 +3554,7 @@
         }
       }
     }
-    return heap->undefined_value();
+    return GetHeap()->undefined_value();
   } else {
     return property_dictionary()->SlowReverseLookup(value);
   }
@@ -3621,7 +3658,7 @@
   // Allocate the code cache if not present.
   if (code_cache()->IsFixedArray()) {
     Object* result;
-    { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
+    { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     set_code_cache(result);
@@ -3807,7 +3844,6 @@
 
 
 Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
-  Heap* heap = GetHeap();
   FixedArray* cache = default_cache();
   int length = cache->length();
   for (int i = 0; i < length; i += kCodeCacheEntrySize) {
@@ -3822,7 +3858,7 @@
       }
     }
   }
-  return heap->undefined_value();
+  return GetHeap()->undefined_value();
 }
 
 
@@ -3913,7 +3949,7 @@
   MUST_USE_RESULT MaybeObject* AsObject() {
     ASSERT(code_ != NULL);
     Object* obj;
-    { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
@@ -3991,7 +4027,6 @@
 
 
 MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
-  Heap* heap = GetHeap();
   ASSERT(!array->HasExternalArrayElements());
   switch (array->GetElementsKind()) {
     case JSObject::FAST_ELEMENTS:
@@ -4002,7 +4037,7 @@
 
       // Allocate a temporary fixed array.
       Object* object;
-      { MaybeObject* maybe_object = heap->AllocateFixedArray(size);
+      { MaybeObject* maybe_object = GetHeap()->AllocateFixedArray(size);
         if (!maybe_object->ToObject(&object)) return maybe_object;
       }
       FixedArray* key_array = FixedArray::cast(object);
@@ -4022,12 +4057,11 @@
       UNREACHABLE();
   }
   UNREACHABLE();
-  return heap->null_value();  // Failure case needs to "return" a value.
+  return GetHeap()->null_value();  // Failure case needs to "return" a value.
 }
 
 
 MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
-  Heap* heap = GetHeap();
   int len0 = length();
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) {
@@ -4053,7 +4087,7 @@
 
   // Allocate the result
   Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArray(len0 + extra);
+  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(len0 + extra);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   // Fill in the content
@@ -5373,7 +5407,7 @@
   if (StringShape(this).IsSymbol()) return false;
 
   Map* map = this->map();
-  Heap* heap = map->GetHeap();
+  Heap* heap = map->heap();
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
@@ -5702,17 +5736,18 @@
   // used for constructing objects to the original object prototype.
   // See ECMA-262 13.2.2.
   if (!value->IsJSObject()) {
-    Heap* heap = GetHeap();
     // Copy the map so this does not affect unrelated functions.
     // Remove map transitions because they point to maps with a
     // different prototype.
-    Object* new_map;
+    Object* new_object;
     { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
-      if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+      if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
     }
-    set_map(Map::cast(new_map));
-    map()->set_constructor(value);
-    map()->set_non_instance_prototype(true);
+    Map* new_map = Map::cast(new_object);
+    Heap* heap = new_map->heap();
+    set_map(new_map);
+    new_map->set_constructor(value);
+    new_map->set_non_instance_prototype(true);
     construct_prototype =
         heap->isolate()->context()->global_context()->
             initial_object_prototype();
@@ -5740,7 +5775,7 @@
   ASSERT(shared()->strict_mode() || map() == global_context->function_map());
 
   set_map(no_prototype_map);
-  set_prototype_or_initial_map(GetHeap()->the_hole_value());
+  set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
   return this;
 }
 
@@ -5822,8 +5857,6 @@
 
 
 bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
-  Heap* heap = GetHeap();
-
   // Check the basic conditions for generating inline constructor code.
   if (!FLAG_inline_new
       || !has_only_simple_this_property_assignments()
@@ -5837,6 +5870,8 @@
     return true;
   }
 
+  Heap* heap = GetHeap();
+
   // Traverse the proposed prototype chain looking for setters for properties of
   // the same names as are set by the inline constructor.
   for (Object* obj = prototype;
@@ -6156,7 +6191,7 @@
 
 
 void Code::InvalidateRelocation() {
-  set_relocation_info(GetHeap()->empty_byte_array());
+  set_relocation_info(heap()->empty_byte_array());
 }
 
 
@@ -6734,7 +6769,6 @@
 
 
 MaybeObject* JSObject::SetElementsLength(Object* len) {
-  Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(AllowsSetElementsLength());
 
@@ -6742,7 +6776,7 @@
   Object* smi_length = Smi::FromInt(0);
   if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
     const int value = Smi::cast(smi_length)->value();
-    if (value < 0) return ArrayLengthRangeError(heap);
+    if (value < 0) return ArrayLengthRangeError(GetHeap());
     switch (GetElementsKind()) {
       case FAST_ELEMENTS: {
         int old_capacity = FixedArray::cast(elements())->length();
@@ -6808,14 +6842,14 @@
     if (len->ToArrayIndex(&length)) {
       return SetSlowElements(len);
     } else {
-      return ArrayLengthRangeError(heap);
+      return ArrayLengthRangeError(GetHeap());
     }
   }
 
   // len is not a number so make the array size one and
   // set only element to len.
   Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArray(1);
+  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   FixedArray::cast(obj)->set(0, len);
@@ -6970,13 +7004,13 @@
 
 
 JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
-  Heap* heap = GetHeap();
-
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return UNDEFINED_ELEMENT;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return UNDEFINED_ELEMENT;
+    }
   }
 
   if (IsJSGlobalProxy()) {
@@ -7042,13 +7076,13 @@
 
 
 bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
-  Heap* heap = GetHeap();
-
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   // Check for lookup interceptor
@@ -7320,14 +7354,15 @@
                                   Object* value,
                                   StrictModeFlag strict_mode,
                                   bool check_prototype) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
-    HandleScope scope;
-    Handle<Object> value_handle(value);
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
-    return *value_handle;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+      HandleScope scope;
+      Handle<Object> value_handle(value);
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+      return *value_handle;
+    }
   }
 
   if (IsJSGlobalProxy()) {
@@ -7522,7 +7557,6 @@
 
 MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
                                                  uint32_t index) {
-  Heap* heap = GetHeap();
   // Get element works for both JSObject and JSArray since
   // JSArray::length cannot change.
   switch (GetElementsKind()) {
@@ -7571,7 +7605,7 @@
 
   // Continue searching via the prototype chain.
   Object* pt = GetPrototype();
-  if (pt->IsNull()) return heap->undefined_value();
+  if (pt->IsNull()) return GetHeap()->undefined_value();
   return pt->GetElementWithReceiver(receiver, index);
 }
 
@@ -7613,12 +7647,13 @@
 
 MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
                                               uint32_t index) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
-    return heap->undefined_value();
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+      return heap->undefined_value();
+    }
   }
 
   if (HasIndexedInterceptor()) {
@@ -7669,6 +7704,7 @@
   }
 
   Object* pt = GetPrototype();
+  Heap* heap = GetHeap();
   if (pt == heap->null_value()) return heap->undefined_value();
   return pt->GetElementWithReceiver(receiver, index);
 }
@@ -7895,7 +7931,6 @@
     JSObject* receiver,
     String* name,
     PropertyAttributes* attributes) {
-  Heap* heap = GetHeap();
   // Check local property in holder, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
@@ -7905,7 +7940,7 @@
   // Continue searching via the prototype chain.
   Object* pt = GetPrototype();
   *attributes = ABSENT;
-  if (pt->IsNull()) return heap->undefined_value();
+  if (pt->IsNull()) return GetHeap()->undefined_value();
   return pt->GetPropertyWithReceiver(receiver, name, attributes);
 }
 
@@ -7914,14 +7949,13 @@
     JSObject* receiver,
     String* name,
     PropertyAttributes* attributes) {
-  Heap* heap = GetHeap();
   // Check local property in holder, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
   }
-  return heap->undefined_value();
+  return GetHeap()->undefined_value();
 }
 
 
@@ -7966,12 +8000,13 @@
 
 
 bool JSObject::HasRealNamedProperty(String* key) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   LookupResult result;
@@ -7981,12 +8016,13 @@
 
 
 bool JSObject::HasRealElementProperty(uint32_t index) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   // Handle [] on String objects.
@@ -8025,17 +8061,18 @@
   }
   // All possibilities have been handled above already.
   UNREACHABLE();
-  return heap->null_value();
+  return GetHeap()->null_value();
 }
 
 
 bool JSObject::HasRealNamedCallbackProperty(String* key) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   LookupResult result;
@@ -8674,7 +8711,6 @@
 
 template<typename Shape, typename Key>
 MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
-  Heap* heap = GetHeap();
   int capacity = Capacity();
   int nof = NumberOfElements() + n;
   int nod = NumberOfDeletedElements();
@@ -8688,7 +8724,7 @@
 
   const int kMinCapacityForPretenure = 256;
   bool pretenure =
-      (capacity > kMinCapacityForPretenure) && !heap->InNewSpace(this);
+      (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
   Object* obj;
   { MaybeObject* maybe_obj =
         Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
@@ -8820,7 +8856,6 @@
 // Collates undefined and unexisting elements below limit from position
 // zero of the elements. The object stays in Dictionary mode.
 MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
-  Heap* heap = GetHeap();
   ASSERT(HasDictionaryElements());
   // Must stay in dictionary mode, either because of requires_slow_elements,
   // or because we are not going to sort (and therefore compact) all of the
@@ -8830,7 +8865,7 @@
   if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
     // Allocate space for result before we start mutating the object.
     Object* new_double;
-    { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
+    { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0);
       if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
     }
     result_double = HeapNumber::cast(new_double);
@@ -8890,6 +8925,7 @@
 
   uint32_t result = pos;
   PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+  Heap* heap = GetHeap();
   while (undefs > 0) {
     if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
       // Adding an entry with the key beyond smi-range requires
@@ -8919,9 +8955,10 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
-  Heap* heap = GetHeap();
   ASSERT(!HasExternalArrayElements());
 
+  Heap* heap = GetHeap();
+
   if (HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
     // Ordering is irrelevant, since we are going to sort anyway.
@@ -9175,9 +9212,9 @@
 
 MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
   ASSERT(!HasFastProperties());
-  Heap* heap = GetHeap();
   int entry = property_dictionary()->FindEntry(name);
   if (entry == StringDictionary::kNotFound) {
+    Heap* heap = GetHeap();
     Object* cell;
     { MaybeObject* maybe_cell =
           heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
@@ -9352,10 +9389,9 @@
 
 
 Object* CompilationCacheTable::Lookup(String* src) {
-  Heap* heap = GetHeap();
   StringKey key(src);
   int entry = FindEntry(&key);
-  if (entry == kNotFound) return heap->undefined_value();
+  if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
@@ -9372,10 +9408,9 @@
 
 Object* CompilationCacheTable::LookupRegExp(String* src,
                                             JSRegExp::Flags flags) {
-  Heap* heap = GetHeap();
   RegExpKey key(src, flags);
   int entry = FindEntry(&key);
-  if (entry == kNotFound) return heap->undefined_value();
+  if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
@@ -9495,10 +9530,9 @@
 
 
 Object* MapCache::Lookup(FixedArray* array) {
-  Heap* heap = GetHeap();
   SymbolsKey key(array);
   int entry = FindEntry(&key);
-  if (entry == kNotFound) return heap->undefined_value();
+  if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
@@ -9854,7 +9888,6 @@
 // Backwards lookup (slow).
 template<typename Shape, typename Key>
 Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
-  Heap* heap = Dictionary<Shape, Key>::GetHeap();
   int capacity = HashTable<Shape, Key>::Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k =  HashTable<Shape, Key>::KeyAt(i);
@@ -9866,13 +9899,13 @@
       if (e == value) return k;
     }
   }
+  Heap* heap = Dictionary<Shape, Key>::GetHeap();
   return heap->undefined_value();
 }
 
 
 MaybeObject* StringDictionary::TransformPropertiesToFastFor(
     JSObject* obj, int unused_property_fields) {
-  Heap* heap = GetHeap();
   // Make sure we preserve dictionary representation if there are too many
   // descriptors.
   if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
@@ -9892,6 +9925,8 @@
   int instance_descriptor_length = 0;
   int number_of_fields = 0;
 
+  Heap* heap = GetHeap();
+
   // Compute the length of the instance descriptor.
   int capacity = Capacity();
   for (int i = 0; i < capacity; i++) {
@@ -10020,12 +10055,11 @@
 
 // Get the break point info object for this code position.
 Object* DebugInfo::GetBreakPointInfo(int code_position) {
-  Heap* heap = GetHeap();
   // Find the index of the break point info object for this code position.
   int index = GetBreakPointInfoIndex(code_position);
 
   // Return the break point info object if any.
-  if (index == kNoBreakPointInfo) return heap->undefined_value();
+  if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
   return BreakPointInfo::cast(break_points()->get(index));
 }
 
@@ -10098,10 +10132,9 @@
 
 // Get the break point objects for a code position.
 Object* DebugInfo::GetBreakPointObjects(int code_position) {
-  Heap* heap = GetHeap();
   Object* break_point_info = GetBreakPointInfo(code_position);
   if (break_point_info->IsUndefined()) {
-    return heap->undefined_value();
+    return GetHeap()->undefined_value();
   }
   return BreakPointInfo::cast(break_point_info)->break_point_objects();
 }
@@ -10124,7 +10157,7 @@
 
 Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
                                       Handle<Object> break_point_object) {
-  Heap* heap = Isolate::Current()->heap();
+  Heap* heap = debug_info->GetHeap();
   if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
   for (int i = 0; i < debug_info->break_points()->length(); i++) {
     if (!debug_info->break_points()->get(i)->IsUndefined()) {
diff --git a/src/objects.h b/src/objects.h
index 96e5cb6..874dcbc 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -3487,6 +3487,10 @@
   void CodeVerify();
 #endif
 
+  // Returns the isolate/heap this code object belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -4255,9 +4259,6 @@
   // this.x = y; where y is either a constant or refers to an argument.
   inline bool has_only_simple_this_property_assignments();
 
-  inline bool try_full_codegen();
-  inline void set_try_full_codegen(bool flag);
-
   // Indicates if this function can be lazy compiled.
   // This is used to determine if we can safely flush code from a function
   // when doing GC if we expect that the function will no longer be used.
@@ -4457,13 +4458,12 @@
 
   // Bit positions in compiler_hints.
   static const int kHasOnlySimpleThisPropertyAssignments = 0;
-  static const int kTryFullCodegen = 1;
-  static const int kAllowLazyCompilation = 2;
-  static const int kLiveObjectsMayExist = 3;
-  static const int kCodeAgeShift = 4;
+  static const int kAllowLazyCompilation = 1;
+  static const int kLiveObjectsMayExist = 2;
+  static const int kCodeAgeShift = 3;
   static const int kCodeAgeMask = 0x7;
-  static const int kOptimizationDisabled = 7;
-  static const int kStrictModeFunction = 8;
+  static const int kOptimizationDisabled = 6;
+  static const int kStrictModeFunction = 7;
 
  private:
 #if V8_HOST_ARCH_32_BIT
@@ -6009,6 +6009,10 @@
                               kValueOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
+  // Returns the isolate/heap this cell object belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
 };
diff --git a/src/parser.cc b/src/parser.cc
index 13e0c33..22d4d3f 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -88,12 +88,13 @@
 
 
 RegExpBuilder::RegExpBuilder()
-  : pending_empty_(false),
-    characters_(NULL),
-    terms_(),
-    alternatives_()
+    : zone_(Isolate::Current()->zone()),
+      pending_empty_(false),
+      characters_(NULL),
+      terms_(),
+      alternatives_()
 #ifdef DEBUG
-  , last_added_(ADD_NONE)
+    , last_added_(ADD_NONE)
 #endif
   {}
 
@@ -101,7 +102,7 @@
 void RegExpBuilder::FlushCharacters() {
   pending_empty_ = false;
   if (characters_ != NULL) {
-    RegExpTree* atom = new RegExpAtom(characters_->ToConstVector());
+    RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
     characters_ = NULL;
     text_.Add(atom);
     LAST(ADD_ATOM);
@@ -117,7 +118,7 @@
   } else if (num_text == 1) {
     terms_.Add(text_.last());
   } else {
-    RegExpText* text = new RegExpText();
+    RegExpText* text = new(zone()) RegExpText();
     for (int i = 0; i < num_text; i++)
       text_.Get(i)->AppendToText(text);
     terms_.Add(text);
@@ -178,7 +179,7 @@
   } else if (num_terms == 1) {
     alternative = terms_.last();
   } else {
-    alternative = new RegExpAlternative(terms_.GetList());
+    alternative = new(zone()) RegExpAlternative(terms_.GetList());
   }
   alternatives_.Add(alternative);
   terms_.Clear();
@@ -195,7 +196,7 @@
   if (num_alternatives == 1) {
     return alternatives_.last();
   }
-  return new RegExpDisjunction(alternatives_.GetList());
+  return new(zone()) RegExpDisjunction(alternatives_.GetList());
 }
 
 
@@ -214,11 +215,11 @@
     int num_chars = char_vector.length();
     if (num_chars > 1) {
       Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
-      text_.Add(new RegExpAtom(prefix));
+      text_.Add(new(zone()) RegExpAtom(prefix));
       char_vector = char_vector.SubVector(num_chars - 1, num_chars);
     }
     characters_ = NULL;
-    atom = new RegExpAtom(char_vector);
+    atom = new(zone()) RegExpAtom(char_vector);
     FlushText();
   } else if (text_.length() > 0) {
     ASSERT(last_added_ == ADD_ATOM);
@@ -241,7 +242,7 @@
     UNREACHABLE();
     return;
   }
-  terms_.Add(new RegExpQuantifier(min, max, type, atom));
+  terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom));
   LAST(ADD_TERM);
 }
 
@@ -408,7 +409,7 @@
 
 
 Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
-  Scope* result = new Scope(parent, type);
+  Scope* result = new(zone()) Scope(parent, type);
   result->Initialize(inside_with);
   return result;
 }
@@ -601,7 +602,7 @@
 
   HistogramTimerScope timer(isolate()->counters()->parse());
   isolate()->counters()->total_parse_size()->Increment(source->length());
-  fni_ = new FuncNameInferrer();
+  fni_ = new(zone()) FuncNameInferrer();
 
   // Initialize parser state.
   source->TryFlatten();
@@ -652,7 +653,7 @@
       CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
     }
     if (ok) {
-      result = new FunctionLiteral(
+      result = new(zone()) FunctionLiteral(
           no_name,
           top_scope_,
           body,
@@ -713,7 +714,7 @@
   ASSERT(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
-  fni_ = new FuncNameInferrer();
+  fni_ = new(zone()) FuncNameInferrer();
   fni_->PushEnclosingName(name);
 
   mode_ = PARSE_EAGERLY;
@@ -1252,7 +1253,7 @@
       // one must take great care not to treat it as a
       // fall-through. It is much easier just to wrap the entire
       // try-statement in a statement block and put the labels there
-      Block* result = new Block(labels, 1, false);
+      Block* result = new(zone()) Block(labels, 1, false);
       Target target(&this->target_stack_, result);
       TryStatement* statement = ParseTryStatement(CHECK_OK);
       if (statement) {
@@ -1350,13 +1351,13 @@
   // a performance issue since it may lead to repeated
   // Runtime::DeclareContextSlot() calls.
   VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
-  top_scope_->AddDeclaration(new Declaration(proxy, mode, fun));
+  top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
 
   // For global const variables we bind the proxy to a variable.
   if (mode == Variable::CONST && top_scope_->is_global_scope()) {
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
-    var = new Variable(top_scope_, name, Variable::CONST, true, kind);
+    var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1444,10 +1445,11 @@
   // TODO(1240846): It's weird that native function declarations are
   // introduced dynamically when we meet their declarations, whereas
   // other functions are setup when entering the surrounding scope.
-  SharedFunctionInfoLiteral* lit = new SharedFunctionInfoLiteral(shared);
+  SharedFunctionInfoLiteral* lit =
+      new(zone()) SharedFunctionInfoLiteral(shared);
   VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
-  return new ExpressionStatement(
-      new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
+  return new(zone()) ExpressionStatement(new(zone()) Assignment(
+      Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
 }
 
 
@@ -1479,7 +1481,7 @@
   // (ECMA-262, 3rd, 12.2)
   //
   // Construct block expecting 16 statements.
-  Block* result = new Block(labels, 16, false);
+  Block* result = new(zone()) Block(labels, 16, false);
   Target target(&this->target_stack_, result);
   Expect(Token::LBRACE, CHECK_OK);
   while (peek() != Token::RBRACE) {
@@ -1549,7 +1551,7 @@
   // is inside an initializer block, it is ignored.
   //
   // Create new block with one expected declaration.
-  Block* block = new Block(NULL, 1, true);
+  Block* block = new(zone()) Block(NULL, 1, true);
   VariableProxy* last_var = NULL;  // the last variable declared
   int nvars = 0;  // the number of variables declared
   do {
@@ -1650,7 +1652,8 @@
     if (top_scope_->is_global_scope()) {
       // Compute the arguments for the runtime call.
       ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
-      arguments->Add(new Literal(name));  // we have at least 1 parameter
+      // We have at least 1 parameter.
+      arguments->Add(new(zone()) Literal(name));
       CallRuntime* initialize;
 
       if (is_const) {
@@ -1662,7 +1665,7 @@
         // Note that the function does different things depending on
         // the number of arguments (1 or 2).
         initialize =
-            new CallRuntime(
+            new(zone()) CallRuntime(
               isolate()->factory()->InitializeConstGlobal_symbol(),
               Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
               arguments);
@@ -1686,13 +1689,13 @@
         // Note that the function does different things depending on
         // the number of arguments (2 or 3).
         initialize =
-            new CallRuntime(
+            new(zone()) CallRuntime(
               isolate()->factory()->InitializeVarGlobal_symbol(),
               Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
               arguments);
       }
 
-      block->AddStatement(new ExpressionStatement(initialize));
+      block->AddStatement(new(zone()) ExpressionStatement(initialize));
     }
 
     // Add an assignment node to the initialization statement block if
@@ -1707,8 +1710,11 @@
     // the top context for variables). Sigh...
     if (value != NULL) {
       Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
-      Assignment* assignment = new Assignment(op, last_var, value, position);
-      if (block) block->AddStatement(new ExpressionStatement(assignment));
+      Assignment* assignment =
+          new(zone()) Assignment(op, last_var, value, position);
+      if (block) {
+        block->AddStatement(new(zone()) ExpressionStatement(assignment));
+      }
     }
 
     if (fni_ != NULL) fni_->Leave();
@@ -1774,7 +1780,7 @@
 
   // Parsed expression statement.
   ExpectSemicolon(CHECK_OK);
-  return new ExpressionStatement(expr);
+  return new(zone()) ExpressionStatement(expr);
 }
 
 
@@ -1794,7 +1800,7 @@
   } else {
     else_statement = EmptyStatement();
   }
-  return new IfStatement(condition, then_statement, else_statement);
+  return new(zone()) IfStatement(condition, then_statement, else_statement);
 }
 
 
@@ -1824,7 +1830,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return new ContinueStatement(target);
+  return new(zone()) ContinueStatement(target);
 }
 
 
@@ -1859,7 +1865,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return new BreakStatement(target);
+  return new(zone()) BreakStatement(target);
 }
 
 
@@ -1880,7 +1886,7 @@
   if (!top_scope_->is_function_scope()) {
     Handle<String> type = isolate()->factory()->illegal_return_symbol();
     Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
-    return new ExpressionStatement(throw_error);
+    return new(zone()) ExpressionStatement(throw_error);
   }
 
   Token::Value tok = peek();
@@ -1889,12 +1895,12 @@
       tok == Token::RBRACE ||
       tok == Token::EOS) {
     ExpectSemicolon(CHECK_OK);
-    return new ReturnStatement(GetLiteralUndefined());
+    return new(zone()) ReturnStatement(GetLiteralUndefined());
   }
 
   Expression* expr = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-  return new ReturnStatement(expr);
+  return new(zone()) ReturnStatement(expr);
 }
 
 
@@ -1915,21 +1921,21 @@
   // Create resulting block with two statements.
   // 1: Evaluate the with expression.
   // 2: The try-finally block evaluating the body.
-  Block* result = new Block(NULL, 2, false);
+  Block* result = new(zone()) Block(NULL, 2, false);
 
   if (result != NULL) {
-    result->AddStatement(new WithEnterStatement(obj, is_catch_block));
+    result->AddStatement(new(zone()) WithEnterStatement(obj, is_catch_block));
 
     // Create body block.
-    Block* body = new Block(NULL, 1, false);
+    Block* body = new(zone()) Block(NULL, 1, false);
     body->AddStatement(stat);
 
     // Create exit block.
-    Block* exit = new Block(NULL, 1, false);
-    exit->AddStatement(new WithExitStatement());
+    Block* exit = new(zone()) Block(NULL, 1, false);
+    exit->AddStatement(new(zone()) WithExitStatement());
 
     // Return a try-finally statement.
-    TryFinallyStatement* wrapper = new TryFinallyStatement(body, exit);
+    TryFinallyStatement* wrapper = new(zone()) TryFinallyStatement(body, exit);
     wrapper->set_escaping_targets(collector.targets());
     result->AddStatement(wrapper);
   }
@@ -1986,7 +1992,7 @@
     statements->Add(stat);
   }
 
-  return new CaseClause(label, statements, pos);
+  return new(zone()) CaseClause(label, statements, pos);
 }
 
 
@@ -1995,7 +2001,7 @@
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
-  SwitchStatement* statement = new SwitchStatement(labels);
+  SwitchStatement* statement = new(zone()) SwitchStatement(labels);
   Target target(&this->target_stack_, statement);
 
   Expect(Token::SWITCH, CHECK_OK);
@@ -2031,7 +2037,7 @@
   Expression* exception = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
 
-  return new ExpressionStatement(new Throw(exception, pos));
+  return new(zone()) ExpressionStatement(new(zone()) Throw(exception, pos));
 }
 
 
@@ -2095,9 +2101,10 @@
       // executing the finally block.
       catch_var =
           top_scope_->NewTemporary(isolate()->factory()->catch_var_symbol());
-      Literal* name_literal = new Literal(name);
-      VariableProxy* catch_var_use = new VariableProxy(catch_var);
-      Expression* obj = new CatchExtensionObject(name_literal, catch_var_use);
+      Literal* name_literal = new(zone()) Literal(name);
+      VariableProxy* catch_var_use = new(zone()) VariableProxy(catch_var);
+      Expression* obj =
+          new(zone()) CatchExtensionObject(name_literal, catch_var_use);
       { Target target(&this->target_stack_, &catch_collector);
         catch_block = WithHelper(obj, NULL, true, CHECK_OK);
       }
@@ -2121,11 +2128,11 @@
   //   'try { try { } catch { } } finally { }'
 
   if (catch_block != NULL && finally_block != NULL) {
-    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
+    VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
     TryCatchStatement* statement =
-        new TryCatchStatement(try_block, catch_var_defn, catch_block);
+        new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
     statement->set_escaping_targets(collector.targets());
-    try_block = new Block(NULL, 1, false);
+    try_block = new(zone()) Block(NULL, 1, false);
     try_block->AddStatement(statement);
     catch_block = NULL;
   }
@@ -2133,12 +2140,13 @@
   TryStatement* result = NULL;
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
-    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
-    result = new TryCatchStatement(try_block, catch_var_defn, catch_block);
+    VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
+    result =
+        new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
     result->set_escaping_targets(collector.targets());
   } else {
     ASSERT(finally_block != NULL);
-    result = new TryFinallyStatement(try_block, finally_block);
+    result = new(zone()) TryFinallyStatement(try_block, finally_block);
     // Add the jump targets of the try block and the catch block.
     for (int i = 0; i < collector.targets()->length(); i++) {
       catch_collector.AddTarget(collector.targets()->at(i));
@@ -2156,7 +2164,7 @@
   //   'do' Statement 'while' '(' Expression ')' ';'
 
   lexical_scope_->AddLoop();
-  DoWhileStatement* loop = new DoWhileStatement(labels);
+  DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2189,7 +2197,7 @@
   //   'while' '(' Expression ')' Statement
 
   lexical_scope_->AddLoop();
-  WhileStatement* loop = new WhileStatement(labels);
+  WhileStatement* loop = new(zone()) WhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::WHILE, CHECK_OK);
@@ -2219,7 +2227,7 @@
       Block* variable_statement =
           ParseVariableDeclarations(false, &each, CHECK_OK);
       if (peek() == Token::IN && each != NULL) {
-        ForInStatement* loop = new ForInStatement(labels);
+        ForInStatement* loop = new(zone()) ForInStatement(labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2228,7 +2236,7 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         loop->Initialize(each, enumerable, body);
-        Block* result = new Block(NULL, 2, false);
+        Block* result = new(zone()) Block(NULL, 2, false);
         result->AddStatement(variable_statement);
         result->AddStatement(loop);
         // Parsed for-in loop w/ variable/const declaration.
@@ -2249,7 +2257,7 @@
               isolate()->factory()->invalid_lhs_in_for_in_symbol();
           expression = NewThrowReferenceError(type);
         }
-        ForInStatement* loop = new ForInStatement(labels);
+        ForInStatement* loop = new(zone()) ForInStatement(labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2262,13 +2270,13 @@
         return loop;
 
       } else {
-        init = new ExpressionStatement(expression);
+        init = new(zone()) ExpressionStatement(expression);
       }
     }
   }
 
   // Standard 'for' loop
-  ForStatement* loop = new ForStatement(labels);
+  ForStatement* loop = new(zone()) ForStatement(labels);
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
@@ -2284,7 +2292,7 @@
   Statement* next = NULL;
   if (peek() != Token::RPAREN) {
     Expression* exp = ParseExpression(true, CHECK_OK);
-    next = new ExpressionStatement(exp);
+    next = new(zone()) ExpressionStatement(exp);
   }
   Expect(Token::RPAREN, CHECK_OK);
 
@@ -2305,7 +2313,7 @@
     Expect(Token::COMMA, CHECK_OK);
     int position = scanner().location().beg_pos;
     Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result = new BinaryOperation(Token::COMMA, result, right, position);
+    result = new(zone()) BinaryOperation(Token::COMMA, result, right, position);
   }
   return result;
 }
@@ -2377,7 +2385,7 @@
     fni_->Leave();
   }
 
-  return new Assignment(op, expression, right, pos);
+  return new(zone()) Assignment(op, expression, right, pos);
 }
 
 
@@ -2399,7 +2407,7 @@
   Expect(Token::COLON, CHECK_OK);
   int right_position = scanner().peek_location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return new Conditional(expression, left, right,
+  return new(zone()) Conditional(expression, left, right,
                          left_position, right_position);
 }
 
@@ -2487,12 +2495,12 @@
         x = NewCompareNode(cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
-          x = new UnaryOperation(Token::NOT, x);
+          x = new(zone()) UnaryOperation(Token::NOT, x);
         }
 
       } else {
         // We have a "normal" binary operation.
-        x = new BinaryOperation(op, x, y, position);
+        x = new(zone()) BinaryOperation(op, x, y, position);
       }
     }
   }
@@ -2509,15 +2517,15 @@
     bool is_strict = (op == Token::EQ_STRICT);
     Literal* x_literal = x->AsLiteral();
     if (x_literal != NULL && x_literal->IsNull()) {
-      return new CompareToNull(is_strict, y);
+      return new(zone()) CompareToNull(is_strict, y);
     }
 
     Literal* y_literal = y->AsLiteral();
     if (y_literal != NULL && y_literal->IsNull()) {
-      return new CompareToNull(is_strict, x);
+      return new(zone()) CompareToNull(is_strict, x);
     }
   }
-  return new CompareOperation(op, x, y, position);
+  return new(zone()) CompareOperation(op, x, y, position);
 }
 
 
@@ -2564,7 +2572,7 @@
       }
     }
 
-    return new UnaryOperation(op, expression);
+    return new(zone()) UnaryOperation(op, expression);
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
@@ -2585,8 +2593,9 @@
     }
 
     int position = scanner().location().beg_pos;
-    IncrementOperation* increment = new IncrementOperation(op, expression);
-    return new CountOperation(true /* prefix */, increment, position);
+    IncrementOperation* increment =
+        new(zone()) IncrementOperation(op, expression);
+    return new(zone()) CountOperation(true /* prefix */, increment, position);
 
   } else {
     return ParsePostfixExpression(ok);
@@ -2618,8 +2627,10 @@
 
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
-    IncrementOperation* increment = new IncrementOperation(next, expression);
-    expression = new CountOperation(false /* postfix */, increment, position);
+    IncrementOperation* increment =
+        new(zone()) IncrementOperation(next, expression);
+    expression =
+        new(zone()) CountOperation(false /* postfix */, increment, position);
   }
   return expression;
 }
@@ -2642,7 +2653,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new Property(result, index, pos);
+        result = new(zone()) Property(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
@@ -2680,7 +2691,7 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new Property(result, new Literal(name), pos);
+        result = new(zone()) Property(result, new(zone()) Literal(name), pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2716,7 +2727,7 @@
 
   if (!stack->is_empty()) {
     int last = stack->pop();
-    result = new CallNew(result, new ZoneList<Expression*>(0), last);
+    result = new(zone()) CallNew(result, new ZoneList<Expression*>(0), last);
   }
   return result;
 }
@@ -2761,7 +2772,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new Property(result, index, pos);
+        result = new(zone()) Property(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
@@ -2769,7 +2780,7 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new Property(result, new Literal(name), pos);
+        result = new(zone()) Property(result, new(zone()) Literal(name), pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2797,7 +2808,7 @@
 
   Expect(Token::DEBUGGER, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-  return new DebuggerStatement();
+  return new(zone()) DebuggerStatement();
 }
 
 
@@ -2866,24 +2877,26 @@
 
     case Token::NULL_LITERAL:
       Consume(Token::NULL_LITERAL);
-      result = new Literal(isolate()->factory()->null_value());
+      result = new(zone()) Literal(isolate()->factory()->null_value());
       break;
 
     case Token::TRUE_LITERAL:
       Consume(Token::TRUE_LITERAL);
-      result = new Literal(isolate()->factory()->true_value());
+      result = new(zone()) Literal(isolate()->factory()->true_value());
       break;
 
     case Token::FALSE_LITERAL:
       Consume(Token::FALSE_LITERAL);
-      result = new Literal(isolate()->factory()->false_value());
+      result = new(zone()) Literal(isolate()->factory()->false_value());
       break;
 
     case Token::IDENTIFIER:
     case Token::FUTURE_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
-      result = top_scope_->NewUnresolved(name, inside_with());
+      result = top_scope_->NewUnresolved(name,
+                                         inside_with(),
+                                         scanner().location().beg_pos);
       break;
     }
 
@@ -2899,7 +2912,7 @@
     case Token::STRING: {
       Consume(Token::STRING);
       Handle<String> symbol = GetSymbol(CHECK_OK);
-      result = new Literal(symbol);
+      result = new(zone()) Literal(symbol);
       if (fni_ != NULL) fni_->PushLiteralName(symbol);
       break;
     }
@@ -3026,7 +3039,7 @@
     literals->set_map(isolate()->heap()->fixed_cow_array_map());
   }
 
-  return new ArrayLiteral(literals, values,
+  return new(zone()) ArrayLiteral(literals, values,
                           literal_index, is_simple, depth);
 }
 
@@ -3304,7 +3317,7 @@
     // Allow any number of parameters for compatiabilty with JSC.
     // Specification only allows zero parameters for get and one for set.
     ObjectLiteral::Property* property =
-        new ObjectLiteral::Property(is_getter, value);
+        new(zone()) ObjectLiteral::Property(is_getter, value);
     return property;
   } else {
     ReportUnexpectedToken(next);
@@ -3370,7 +3383,7 @@
         }
         // Failed to parse as get/set property, so it's just a property
         // called "get" or "set".
-        key = new Literal(id);
+        key = new(zone()) Literal(id);
         break;
       }
       case Token::STRING: {
@@ -3382,7 +3395,7 @@
           key = NewNumberLiteral(index);
           break;
         }
-        key = new Literal(string);
+        key = new(zone()) Literal(string);
         break;
       }
       case Token::NUMBER: {
@@ -3397,7 +3410,7 @@
         if (Token::IsKeyword(next)) {
           Consume(next);
           Handle<String> string = GetSymbol(CHECK_OK);
-          key = new Literal(string);
+          key = new(zone()) Literal(string);
         } else {
           // Unexpected token.
           Token::Value next = Next();
@@ -3411,7 +3424,7 @@
     Expression* value = ParseAssignmentExpression(true, CHECK_OK);
 
     ObjectLiteral::Property* property =
-        new ObjectLiteral::Property(key, value);
+        new(zone()) ObjectLiteral::Property(key, value);
 
     // Mark object literals that contain function literals and pretenure the
     // literal so it can be added as a constant function property.
@@ -3450,7 +3463,7 @@
                                        &is_simple,
                                        &fast_elements,
                                        &depth);
-  return new ObjectLiteral(constant_properties,
+  return new(zone()) ObjectLiteral(constant_properties,
                            properties,
                            literal_index,
                            is_simple,
@@ -3475,7 +3488,7 @@
   Handle<String> js_flags = NextLiteralString(TENURED);
   Next();
 
-  return new RegExpLiteral(js_pattern, js_flags, literal_index);
+  return new(zone()) RegExpLiteral(js_pattern, js_flags, literal_index);
 }
 
 
@@ -3578,9 +3591,9 @@
       VariableProxy* fproxy =
           top_scope_->NewUnresolved(function_name, inside_with());
       fproxy->BindTo(fvar);
-      body->Add(new ExpressionStatement(
-                    new Assignment(Token::INIT_CONST, fproxy,
-                                   new ThisFunction(),
+      body->Add(new(zone()) ExpressionStatement(
+                    new(zone()) Assignment(Token::INIT_CONST, fproxy,
+                                   new(zone()) ThisFunction(),
                                    RelocInfo::kNoPosition)));
     }
 
@@ -3674,7 +3687,7 @@
     }
 
     FunctionLiteral* function_literal =
-        new FunctionLiteral(name,
+        new(zone()) FunctionLiteral(name,
                             top_scope_,
                             body,
                             materialized_literal_count,
@@ -3736,7 +3749,7 @@
   }
 
   // We have a valid intrinsics call or a call to a builtin.
-  return new CallRuntime(name, function, args);
+  return new(zone()) CallRuntime(name, function, args);
 }
 
 
@@ -3791,12 +3804,12 @@
 
 
 Literal* Parser::GetLiteralUndefined() {
-  return new Literal(isolate()->factory()->undefined_value());
+  return new(zone()) Literal(isolate()->factory()->undefined_value());
 }
 
 
 Literal* Parser::GetLiteralTheHole() {
-  return new Literal(isolate()->factory()->the_hole_value());
+  return new(zone()) Literal(isolate()->factory()->the_hole_value());
 }
 
 
@@ -3944,7 +3957,7 @@
 
 
 Literal* Parser::NewNumberLiteral(double number) {
-  return new Literal(isolate()->factory()->NewNumber(number, TENURED));
+  return new(zone()) Literal(isolate()->factory()->NewNumber(number, TENURED));
 }
 
 
@@ -3991,9 +4004,9 @@
                                                                        TENURED);
 
   ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
-  args->Add(new Literal(type));
-  args->Add(new Literal(array));
-  return new Throw(new CallRuntime(constructor, NULL, args),
+  args->Add(new(zone()) Literal(type));
+  args->Add(new(zone()) Literal(array));
+  return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
                    scanner().location().beg_pos);
 }
 
@@ -4316,13 +4329,13 @@
 
       // Build result of subexpression.
       if (type == CAPTURE) {
-        RegExpCapture* capture = new RegExpCapture(body, capture_index);
+        RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
         captures_->at(capture_index - 1) = capture;
         body = capture;
       } else if (type != GROUPING) {
         ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
         bool is_positive = (type == POSITIVE_LOOKAHEAD);
-        body = new RegExpLookahead(body,
+        body = new(zone()) RegExpLookahead(body,
                                    is_positive,
                                    end_capture_index - capture_index,
                                    capture_index);
@@ -4345,10 +4358,10 @@
       Advance();
       if (multiline_) {
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::START_OF_LINE));
+            new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
       } else {
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+            new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
         set_contains_anchor();
       }
       continue;
@@ -4358,7 +4371,7 @@
       RegExpAssertion::Type type =
           multiline_ ? RegExpAssertion::END_OF_LINE :
                        RegExpAssertion::END_OF_INPUT;
-      builder->AddAssertion(new RegExpAssertion(type));
+      builder->AddAssertion(new(zone()) RegExpAssertion(type));
       continue;
     }
     case '.': {
@@ -4366,7 +4379,7 @@
       // everything except \x0a, \x0d, \u2028 and \u2029
       ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
       CharacterRange::AddClassEscape('.', ranges);
-      RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+      RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
       builder->AddAtom(atom);
       break;
     }
@@ -4399,7 +4412,7 @@
         captures_->Add(NULL);
       }
       // Store current state and begin new disjunction parsing.
-      stored_state = new RegExpParserState(stored_state,
+      stored_state = new(zone()) RegExpParserState(stored_state,
                                            type,
                                            captures_started());
       builder = stored_state->builder();
@@ -4419,12 +4432,12 @@
       case 'b':
         Advance(2);
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::BOUNDARY));
+            new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
         continue;
       case 'B':
         Advance(2);
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+            new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
         continue;
       // AtomEscape ::
       //   CharacterClassEscape
@@ -4436,7 +4449,7 @@
         Advance(2);
         ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
         CharacterRange::AddClassEscape(c, ranges);
-        RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+        RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
         builder->AddAtom(atom);
         break;
       }
@@ -4452,7 +4465,7 @@
             builder->AddEmpty();
             break;
           }
-          RegExpTree* atom = new RegExpBackReference(capture);
+          RegExpTree* atom = new(zone()) RegExpBackReference(capture);
           builder->AddAtom(atom);
           break;
         }
@@ -4970,7 +4983,7 @@
     ranges->Add(CharacterRange::Everything());
     is_negated = !is_negated;
   }
-  return new RegExpCharacterClass(ranges, is_negated);
+  return new(zone()) RegExpCharacterClass(ranges, is_negated);
 }
 
 
diff --git a/src/parser.h b/src/parser.h
index 74cb049..78faea1 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -280,6 +280,9 @@
   void FlushCharacters();
   void FlushText();
   void FlushTerms();
+  Zone* zone() { return zone_; }
+
+  Zone* zone_;
   bool pending_empty_;
   ZoneList<uc16>* characters_;
   BufferedZoneList<RegExpTree, 2> terms_;
@@ -389,6 +392,7 @@
   };
 
   Isolate* isolate() { return isolate_; }
+  Zone* zone() { return isolate_->zone(); }
 
   uc32 current() { return current_; }
   bool has_more() { return has_more_; }
@@ -453,6 +457,7 @@
   };
 
   Isolate* isolate() { return isolate_; }
+  Zone* zone() { return isolate_->zone(); }
 
   // Called by ParseProgram after setting up the scanner.
   FunctionLiteral* DoParseProgram(Handle<String> source,
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 4b450c1..d591b9d 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -42,7 +42,6 @@
 #include "v8.h"
 
 #include "platform.h"
-#include "top.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #include "win32-headers.h"
@@ -59,6 +58,9 @@
 }
 
 
+static Mutex* limit_mutex = NULL;
+
+
 void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
@@ -67,6 +69,7 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
+  limit_mutex = CreateMutex();
 }
 
 
@@ -119,6 +122,9 @@
 
 
 static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  ASSERT(limit_mutex != NULL);
+  ScopedLock lock(limit_mutex);
+
   lowest_ever_allocated = Min(lowest_ever_allocated, address);
   highest_ever_allocated =
       Max(highest_ever_allocated,
@@ -254,6 +260,7 @@
   const int kLibNameLen = FILENAME_MAX + 1;
   char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
 
+  i::Isolate* isolate = ISOLATE;
   // This loop will terminate once the scanning hits an EOF.
   while (true) {
     uintptr_t start, end;
@@ -287,7 +294,7 @@
         snprintf(lib_name, kLibNameLen,
                  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
       }
-      LOG(SharedLibraryEvent(lib_name, start, end));
+      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
     } else {
       // Entry not describing executable data. Skip to end of line to setup
       // reading the next entry.
@@ -314,47 +321,44 @@
 }
 
 
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
 
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
   size_ = size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
   }
 }
 
 
-bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
-}
-
-
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
-  if (mprotect(address, size, prot) != 0) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
     return false;
   }
 
-  UpdateAllocatedSpaceLimits(address, size);
+  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
   return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+  ASSERT(IsReserved());
+  return VirtualFree(address, size, MEM_DECOMMIT) != false;
 }
 
 
@@ -427,6 +431,7 @@
   // one) so we initialize it here too.
   thread->thread_handle_data()->thread_ = pthread_self();
   ASSERT(thread->IsValid());
+  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -439,7 +444,14 @@
 
 
 void Thread::Start() {
-  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+  pthread_attr_t* attr_ptr = NULL;
+  pthread_attr_t attr;
+  if (stack_size_ > 0) {
+    pthread_attr_init(&attr);
+    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+    attr_ptr = &attr;
+  }
+  pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
   ASSERT(IsValid());
 }
 
@@ -623,128 +635,176 @@
 
 class Sampler::PlatformData : public Malloced {
  public:
-  explicit PlatformData(Sampler* sampler) {
-    sampler_ = sampler;
-    sampler_thread_ = INVALID_HANDLE_VALUE;
-    profiled_thread_ = INVALID_HANDLE_VALUE;
-  }
-
-  Sampler* sampler_;
-  HANDLE sampler_thread_;
-  HANDLE profiled_thread_;
-  RuntimeProfilerRateLimiter rate_limiter_;
-
-  // Sampler thread handler.
-  void Runner() {
-    while (sampler_->IsActive()) {
-      if (rate_limiter_.SuspendIfNecessary()) continue;
-      Sample();
-      Sleep(sampler_->interval_);
-    }
-  }
-
-  void Sample() {
-    if (sampler_->IsProfiling()) {
-      // Context used for sampling the register state of the profiled thread.
-      CONTEXT context;
-      memset(&context, 0, sizeof(context));
-
-      TickSample sample_obj;
-      TickSample* sample = CpuProfiler::TickSampleEvent();
-      if (sample == NULL) sample = &sample_obj;
-
-      static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
-      if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
-      sample->state = Top::current_vm_state();
-
-      context.ContextFlags = CONTEXT_FULL;
-      if (GetThreadContext(profiled_thread_, &context) != 0) {
-#if V8_HOST_ARCH_X64
-        sample->pc = reinterpret_cast<Address>(context.Rip);
-        sample->sp = reinterpret_cast<Address>(context.Rsp);
-        sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
-        sample->pc = reinterpret_cast<Address>(context.Eip);
-        sample->sp = reinterpret_cast<Address>(context.Esp);
-        sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-        sampler_->SampleStack(sample);
-        sampler_->Tick(sample);
-      }
-      ResumeThread(profiled_thread_);
-    }
-    if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
-  }
-};
-
-
-// Entry point for sampler thread.
-static DWORD __stdcall SamplerEntry(void* arg) {
-  Sampler::PlatformData* data =
-      reinterpret_cast<Sampler::PlatformData*>(arg);
-  data->Runner();
-  return 0;
-}
-
-
-// Initialize a profile sampler.
-Sampler::Sampler(int interval)
-    : interval_(interval),
-      profiling_(false),
-      active_(false),
-      samples_taken_(0) {
-  data_ = new PlatformData(this);
-}
-
-
-Sampler::~Sampler() {
-  delete data_;
-}
-
-
-// Start profiling.
-void Sampler::Start() {
-  // Do not start multiple threads for the same sampler.
-  ASSERT(!IsActive());
-
   // Get a handle to the calling thread. This is the thread that we are
   // going to profile. We need to make a copy of the handle because we are
   // going to use it in the sampler thread. Using GetThreadHandle() will
   // not work in this case. We're using OpenThread because DuplicateHandle
   // for some reason doesn't work in Chrome's sandbox.
-  data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
-                                       THREAD_SUSPEND_RESUME |
-                                       THREAD_QUERY_INFORMATION,
-                                       false,
-                                       GetCurrentThreadId());
-  BOOL ok = data_->profiled_thread_ != NULL;
-  if (!ok) return;
+  PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+                                               THREAD_SUSPEND_RESUME |
+                                               THREAD_QUERY_INFORMATION,
+                                               false,
+                                               GetCurrentThreadId())) {}
 
-  // Start sampler thread.
-  DWORD tid;
+  ~PlatformData() {
+    if (profiled_thread_ != NULL) {
+      CloseHandle(profiled_thread_);
+      profiled_thread_ = NULL;
+    }
+  }
+
+  HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
+  HANDLE profiled_thread_;
+};
+
+
+class SamplerThread : public Thread {
+ public:
+  explicit SamplerThread(int interval)
+      : Thread(NULL, "SamplerThread"),
+        interval_(interval) {}
+
+  static void AddActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::AddActiveSampler(sampler);
+    if (instance_ == NULL) {
+      instance_ = new SamplerThread(sampler->interval());
+      instance_->Start();
+    } else {
+      ASSERT(instance_->interval_ == sampler->interval());
+    }
+  }
+
+  static void RemoveActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::RemoveActiveSampler(sampler);
+    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+      instance_->Join();
+      delete instance_;
+      instance_ = NULL;
+    }
+  }
+
+  // Implement Thread::Run().
+  virtual void Run() {
+    SamplerRegistry::State state;
+    while ((state = SamplerRegistry::GetState()) !=
+           SamplerRegistry::HAS_NO_SAMPLERS) {
+      bool cpu_profiling_enabled =
+          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+      // When CPU profiling is enabled both JavaScript and C++ code is
+      // profiled. We must not suspend.
+      if (!cpu_profiling_enabled) {
+        if (rate_limiter_.SuspendIfNecessary()) continue;
+      }
+      if (cpu_profiling_enabled) {
+        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+          return;
+        }
+      }
+      if (runtime_profiler_enabled) {
+        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+          return;
+        }
+      }
+      OS::Sleep(interval_);
+    }
+  }
+
+  static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+    if (!sampler->isolate()->IsInitialized()) return;
+    if (!sampler->IsProfiling()) return;
+    SamplerThread* sampler_thread =
+        reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+    sampler_thread->SampleContext(sampler);
+  }
+
+  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+    if (!sampler->isolate()->IsInitialized()) return;
+    sampler->isolate()->runtime_profiler()->NotifyTick();
+  }
+
+  void SampleContext(Sampler* sampler) {
+    HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
+    if (profiled_thread == NULL) return;
+
+    // Context used for sampling the register state of the profiled thread.
+    CONTEXT context;
+    memset(&context, 0, sizeof(context));
+
+    TickSample sample_obj;
+    TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+    if (sample == NULL) sample = &sample_obj;
+
+    static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+    if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+    sample->state = sampler->isolate()->current_vm_state();
+
+    context.ContextFlags = CONTEXT_FULL;
+    if (GetThreadContext(profiled_thread, &context) != 0) {
+#if V8_HOST_ARCH_X64
+      sample->pc = reinterpret_cast<Address>(context.Rip);
+      sample->sp = reinterpret_cast<Address>(context.Rsp);
+      sample->fp = reinterpret_cast<Address>(context.Rbp);
+#else
+      sample->pc = reinterpret_cast<Address>(context.Eip);
+      sample->sp = reinterpret_cast<Address>(context.Esp);
+      sample->fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+      sampler->SampleStack(sample);
+      sampler->Tick(sample);
+    }
+    ResumeThread(profiled_thread);
+  }
+
+  const int interval_;
+  RuntimeProfilerRateLimiter rate_limiter_;
+
+  // Protects the process wide state below.
+  static Mutex* mutex_;
+  static SamplerThread* instance_;
+
+  DISALLOW_COPY_AND_ASSIGN(SamplerThread);
+};
+
+
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
+
+
+Sampler::Sampler(Isolate* isolate, int interval)
+    : isolate_(isolate),
+      interval_(interval),
+      profiling_(false),
+      active_(false),
+      samples_taken_(0) {
+  data_ = new PlatformData;
+}
+
+
+Sampler::~Sampler() {
+  ASSERT(!IsActive());
+  delete data_;
+}
+
+
+void Sampler::Start() {
+  ASSERT(!IsActive());
   SetActive(true);
-  data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid);
-  // Set thread to high priority to increase sampling accuracy.
-  SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+  SamplerThread::AddActiveSampler(this);
 }
 
 
-// Stop profiling.
 void Sampler::Stop() {
-  // Seting active to false triggers termination of the sampler
-  // thread.
+  ASSERT(IsActive());
+  SamplerThread::RemoveActiveSampler(this);
   SetActive(false);
-
-  // Wait for sampler thread to terminate.
-  Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
-  WaitForSingleObject(data_->sampler_thread_, INFINITE);
-
-  // Release the thread handles
-  CloseHandle(data_->sampler_thread_);
-  CloseHandle(data_->profiled_thread_);
 }
 
-
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 17e3042..bfdf3b2 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -48,8 +48,10 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 #include <sys/types.h>
+#include <sys/sysctl.h>
 #include <stdarg.h>
 #include <stdlib.h>
+#include <string.h>
 #include <errno.h>
 
 #undef MAP_TYPE
@@ -507,12 +509,79 @@
 }
 
 
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+  const size_t kBufferSize = 128;
+  char buffer[kBufferSize];
+  size_t buffer_size = kBufferSize;
+  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+  }
+  // The buffer now contains a string of the form XX.YY.ZZ, where
+  // XX is the major kernel version component.
+  // Make sure the buffer is 0-terminated.
+  buffer[kBufferSize - 1] = '\0';
+  char* period_pos = strchr(buffer, '.');
+  *period_pos = '\0';
+  int kernel_version_major =
+      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
+  // The constants below are taken from pthreads.s from the XNU kernel
+  // sources archive at www.opensource.apple.com.
+  if (kernel_version_major < 11) {
+    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+    // same offsets.
+#if defined(V8_HOST_ARCH_IA32)
+    kMacTlsBaseOffset = 0x48;
+#else
+    kMacTlsBaseOffset = 0x60;
+#endif
+  } else {
+    // 11.x.x (Lion) changed the offset.
+    kMacTlsBaseOffset = 0;
+  }
+
+  Release_Store(&tls_base_offset_initialized, 1);
+}
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+  void* expected = reinterpret_cast<void*>(0x1234CAFE);
+  Thread::SetThreadLocal(key, expected);
+  void* actual = Thread::GetExistingThreadLocal(key);
+  if (expected != actual) {
+    V8_Fatal(__FILE__, __LINE__,
+             "V8 failed to initialize fast TLS on current kernel");
+  }
+  Thread::SetThreadLocal(key, NULL);
+}
+
+#endif  // V8_FAST_TLS_SUPPORTED
+
+
 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+  bool check_fast_tls = false;
+  if (tls_base_offset_initialized == 0) {
+    check_fast_tls = true;
+    InitializeTlsBaseOffset();
+  }
+#endif
   pthread_key_t key;
   int result = pthread_key_create(&key, NULL);
   USE(result);
   ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
+  LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+  // If we just initialized fast TLS support, make sure it works.
+  if (check_fast_tls) CheckFastTls(typed_key);
+#endif
+  return typed_key;
 }
 
 
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 1dd486e..c4b0fb8 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -205,6 +205,31 @@
 }
 
 
+#if defined(V8_TARGET_ARCH_IA32)
+static OS::MemCopyFunction memcopy_function = NULL;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
+// Defined in codegen-ia32.cc.
+OS::MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+void OS::MemCopy(void* dest, const void* src, size_t size) {
+  if (memcopy_function == NULL) {
+    ScopedLock lock(memcopy_function_mutex);
+    if (memcopy_function == NULL) {
+      OS::MemCopyFunction temp = CreateMemCopyFunction();
+      MemoryBarrier();
+      memcopy_function = temp;
+    }
+  }
+  // Note: here we rely on dependent reads being ordered. This is true
+  // on all architectures we currently support.
+  (*memcopy_function)(dest, src, size);
+#ifdef DEBUG
+  CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+#endif  // V8_TARGET_ARCH_IA32
+
 // ----------------------------------------------------------------------------
 // POSIX string support.
 //
diff --git a/src/platform-tls-mac.h b/src/platform-tls-mac.h
index 86a3347..728524e 100644
--- a/src/platform-tls-mac.h
+++ b/src/platform-tls-mac.h
@@ -37,20 +37,20 @@
 
 #define V8_FAST_TLS_SUPPORTED 1
 
+extern intptr_t kMacTlsBaseOffset;
+
 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
 
 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
-  // The constants below are taken from pthreads.s from the XNU kernel
-  // sources archive at www.opensource.apple.com.
   intptr_t result;
 #if defined(V8_HOST_ARCH_IA32)
-  asm("movl %%gs:0x48(,%1,4), %0;"
+  asm("movl %%gs:(%1,%2,4), %0;"
       :"=r"(result)  // Output must be a writable register.
-      :"0"(index));  // Input is the same as output.
+      :"r"(kMacTlsBaseOffset), "r"(index));
 #else
-  asm("movq %%gs:0x60(,%1,8), %0;"
+  asm("movq %%gs:(%1,%2,8), %0;"
       :"=r"(result)
-      :"0"(index));
+      :"r"(kMacTlsBaseOffset), "r"(index));
 #endif
   return result;
 }
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 50a9e5b..ab03e3d 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -176,16 +176,50 @@
 
 static Mutex* limit_mutex = NULL;
 
+#if defined(V8_TARGET_ARCH_IA32)
+static OS::MemCopyFunction memcopy_function = NULL;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
+// Defined in codegen-ia32.cc.
+OS::MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+void OS::MemCopy(void* dest, const void* src, size_t size) {
+  if (memcopy_function == NULL) {
+    ScopedLock lock(memcopy_function_mutex);
+    if (memcopy_function == NULL) {
+      OS::MemCopyFunction temp = CreateMemCopyFunction();
+      MemoryBarrier();
+      memcopy_function = temp;
+    }
+  }
+  // Note: here we rely on dependent reads being ordered. This is true
+  // on all architectures we currently support.
+  (*memcopy_function)(dest, src, size);
+#ifdef DEBUG
+  CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+#endif  // V8_TARGET_ARCH_IA32
 
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
-
+static ModuloFunction modulo_function = NULL;
+static Mutex* modulo_function_mutex = OS::CreateMutex();
 // Defined in codegen-x64.cc.
 ModuloFunction CreateModuloFunction();
 
 double modulo(double x, double y) {
-  static ModuloFunction function = CreateModuloFunction();
-  return function(x, y);
+  if (modulo_function == NULL) {
+    ScopedLock lock(modulo_function_mutex);
+    if (modulo_function == NULL) {
+      ModuloFunction temp = CreateModuloFunction();
+      MemoryBarrier();
+      modulo_function = temp;
+    }
+  }
+  // Note: here we rely on dependent reads being ordered. This is true
+  // on all architectures we currently support.
+  return (*modulo_function)(x, y);
 }
 #else  // Win32
 
diff --git a/src/platform.h b/src/platform.h
index b2e0c48..fea16c8 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -303,6 +303,21 @@
 
   static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
 
+#if defined(V8_TARGET_ARCH_IA32)
+  // Copy memory area to disjoint memory area.
+  static void MemCopy(void* dest, const void* src, size_t size);
+  // Limit below which the extra overhead of the MemCopy function is likely
+  // to outweigh the benefits of faster copying.
+  static const int kMinComplexMemCopy = 64;
+  typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
+
+#else  // V8_TARGET_ARCH_IA32
+  static void MemCopy(void* dest, const void* src, size_t size) {
+    memcpy(dest, src, size);
+  }
+  static const int kMinComplexMemCopy = 256;
+#endif  // V8_TARGET_ARCH_IA32
+
  private:
   static const int msPerSecond = 1000;
 
@@ -493,10 +508,10 @@
 
 
 // ----------------------------------------------------------------------------
-// ScopedLock/ScopedUnlock
+// ScopedLock
 //
-// Stack-allocated ScopedLocks/ScopedUnlocks provide block-scoped
-// locking and unlocking of a mutex.
+// Stack-allocated ScopedLocks provide block-scoped locking and
+// unlocking of a mutex.
 class ScopedLock {
  public:
   explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
@@ -596,7 +611,8 @@
         sp(NULL),
         fp(NULL),
         tos(NULL),
-        frames_count(0) {}
+        frames_count(0),
+        has_external_callback(false) {}
   StateTag state;  // The state of the VM.
   Address pc;      // Instruction pointer.
   Address sp;      // Stack pointer.
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index c9db94f..fd3268d 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1690,7 +1690,7 @@
                         : "",
                     children_count,
                     retainers_count);
-  } else if (object->IsFixedArray()) {
+  } else if (object->IsFixedArray() || object->IsByteArray()) {
     return AddEntry(object,
                     HeapEntry::kArray,
                     "",
@@ -1705,7 +1705,7 @@
   }
   return AddEntry(object,
                   HeapEntry::kHidden,
-                  "system",
+                  GetSystemEntryName(object),
                   children_count,
                   retainers_count);
 }
@@ -1731,6 +1731,21 @@
 }
 
 
+const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
+  switch (object->map()->instance_type()) {
+    case MAP_TYPE: return "system / Map";
+    case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
+    case PROXY_TYPE: return "system / Proxy";
+    case ODDBALL_TYPE: return "system / Oddball";
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+    case NAME##_TYPE: return "system / "#Name;
+  STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+    default: return "system";
+  }
+}
+
+
 int V8HeapExplorer::EstimateObjectsCount() {
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
   int objects_count = 0;
@@ -1745,12 +1760,10 @@
  public:
   IndexedReferencesExtractor(V8HeapExplorer* generator,
                              HeapObject* parent_obj,
-                             HeapEntry* parent_entry,
-                             bool process_field_marks = false)
+                             HeapEntry* parent_entry)
       : generator_(generator),
         parent_obj_(parent_obj),
         parent_(parent_entry),
-        process_field_marks_(process_field_marks),
         next_index_(1) {
   }
   void VisitPointers(Object** start, Object** end) {
@@ -1768,7 +1781,7 @@
   }
  private:
   bool CheckVisitedAndUnmark(Object** field) {
-    if (process_field_marks_ && (*field)->IsFailure()) {
+    if ((*field)->IsFailure()) {
       intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
       *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
       ASSERT((*field)->IsHeapObject());
@@ -1779,7 +1792,6 @@
   V8HeapExplorer* generator_;
   HeapObject* parent_obj_;
   HeapEntry* parent_;
-  bool process_field_marks_;
   int next_index_;
 };
 
@@ -1794,6 +1806,7 @@
     // uses for the global object.
     JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
     SetRootShortcutReference(proxy->map()->prototype());
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
     IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   } else if (obj->IsJSObject()) {
@@ -1806,10 +1819,6 @@
         obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
     if (obj->IsJSFunction()) {
       JSFunction* js_fun = JSFunction::cast(js_obj);
-      SetInternalReference(
-          js_fun, entry,
-          "code", js_fun->shared(),
-          JSFunction::kSharedFunctionInfoOffset);
       Object* proto_or_map = js_fun->prototype_or_initial_map();
       if (!proto_or_map->IsTheHole()) {
         if (!proto_or_map->IsMap()) {
@@ -1823,8 +1832,24 @@
               HEAP->prototype_symbol(), js_fun->prototype());
         }
       }
+      SetInternalReference(js_fun, entry,
+                           "shared", js_fun->shared(),
+                           JSFunction::kSharedFunctionInfoOffset);
+      SetInternalReference(js_fun, entry,
+                           "context", js_fun->unchecked_context(),
+                           JSFunction::kContextOffset);
+      SetInternalReference(js_fun, entry,
+                           "literals", js_fun->literals(),
+                           JSFunction::kLiteralsOffset);
     }
-    IndexedReferencesExtractor refs_extractor(this, obj, entry, true);
+    SetInternalReference(obj, entry,
+                         "properties", js_obj->properties(),
+                         JSObject::kPropertiesOffset);
+    SetInternalReference(obj, entry,
+                         "elements", js_obj->elements(),
+                         JSObject::kElementsOffset);
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   } else if (obj->IsString()) {
     if (obj->IsConsString()) {
@@ -1832,7 +1857,41 @@
       SetInternalReference(obj, entry, 1, cs->first());
       SetInternalReference(obj, entry, 2, cs->second());
     }
+  } else if (obj->IsMap()) {
+    Map* map = Map::cast(obj);
+    SetInternalReference(obj, entry,
+                         "prototype", map->prototype(), Map::kPrototypeOffset);
+    SetInternalReference(obj, entry,
+                         "constructor", map->constructor(),
+                         Map::kConstructorOffset);
+    SetInternalReference(obj, entry,
+                         "descriptors", map->instance_descriptors(),
+                         Map::kInstanceDescriptorsOffset);
+    SetInternalReference(obj, entry,
+                         "code_cache", map->code_cache(),
+                         Map::kCodeCacheOffset);
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
+    obj->Iterate(&refs_extractor);
+  } else if (obj->IsSharedFunctionInfo()) {
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+    SetInternalReference(obj, entry,
+                         "name", shared->name(),
+                         SharedFunctionInfo::kNameOffset);
+    SetInternalReference(obj, entry,
+                         "code", shared->unchecked_code(),
+                         SharedFunctionInfo::kCodeOffset);
+    SetInternalReference(obj, entry,
+                         "instance_class_name", shared->instance_class_name(),
+                         SharedFunctionInfo::kInstanceClassNameOffset);
+    SetInternalReference(obj, entry,
+                         "script", shared->script(),
+                         SharedFunctionInfo::kScriptOffset);
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
+    obj->Iterate(&refs_extractor);
   } else {
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
     IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   }
@@ -2307,7 +2366,7 @@
   ASSERT(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
                              wrapper, wrapper_entry,
-                             "Native",
+                             "native",
                              info, info_entry);
   filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
                                         info, info_entry,
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 377c083..bbc9efc 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -930,6 +930,7 @@
                       const char* name,
                       int children_count,
                       int retainers_count);
+  const char* GetSystemEntryName(HeapObject* object);
   void ExtractReferences(HeapObject* obj);
   void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
diff --git a/src/runtime.cc b/src/runtime.cc
index c979849..ddfdb7f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -224,17 +224,13 @@
 }
 
 
-static MaybeObject* Runtime_CloneLiteralBoilerplate(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
   CONVERT_CHECKED(JSObject, boilerplate, args[0]);
   return DeepCopyBoilerplate(isolate, boilerplate);
 }
 
 
-static MaybeObject* Runtime_CloneShallowLiteralBoilerplate(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
   CONVERT_CHECKED(JSObject, boilerplate, args[0]);
   return isolate->heap()->CopyJSObject(boilerplate);
 }
@@ -475,9 +471,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateArrayLiteralBoilerplate(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
   // Takes a FixedArray of elements containing the literal elements of
   // the array literal and produces JSArray with those elements.
   // Additionally takes the literals array of the surrounding function
@@ -499,8 +493,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateObjectLiteral(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -526,9 +519,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateObjectLiteralShallow(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -554,8 +545,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateArrayLiteral(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -574,9 +564,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateArrayLiteralShallow(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -599,9 +587,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateCatchExtensionObject(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[0]);
   Object* value = args[1];
@@ -625,8 +611,7 @@
 }
 
 
-static MaybeObject* Runtime_ClassOf(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   Object* obj = args[0];
@@ -635,8 +620,7 @@
 }
 
 
-static MaybeObject* Runtime_IsInPrototypeChain(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
@@ -652,8 +636,7 @@
 
 
 // Inserts an object as the hidden prototype of another object.
-static MaybeObject* Runtime_SetHiddenPrototype(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSObject, jsobject, args[0]);
@@ -695,8 +678,7 @@
 }
 
 
-static MaybeObject* Runtime_IsConstructCall(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
   JavaScriptFrameIterator it;
@@ -824,8 +806,7 @@
 //         [false, value, Writeable, Enumerable, Configurable]
 //  if args[1] is an accessor on args[0]
 //         [true, GetFunction, SetFunction, Enumerable, Configurable]
-static MaybeObject* Runtime_GetOwnProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
   ASSERT(args.length() == 2);
   Heap* heap = isolate->heap();
   HandleScope scope(isolate);
@@ -962,16 +943,14 @@
 }
 
 
-static MaybeObject* Runtime_PreventExtensions(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, obj, args[0]);
   return obj->PreventExtensions();
 }
 
 
-static MaybeObject* Runtime_IsExtensible(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, obj, args[0]);
   if (obj->IsJSGlobalProxy()) {
@@ -985,8 +964,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpCompile(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSRegExp, re, 0);
@@ -998,8 +976,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateApiFunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
@@ -1007,8 +984,7 @@
 }
 
 
-static MaybeObject* Runtime_IsTemplate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
   ASSERT(args.length() == 1);
   Object* arg = args[0];
   bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
@@ -1016,8 +992,7 @@
 }
 
 
-static MaybeObject* Runtime_GetTemplateField(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(HeapObject, templ, args[0]);
   CONVERT_CHECKED(Smi, field, args[1]);
@@ -1036,8 +1011,7 @@
 }
 
 
-static MaybeObject* Runtime_DisableAccessChecks(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(HeapObject, object, args[0]);
   Map* old_map = object->map();
@@ -1057,8 +1031,7 @@
 }
 
 
-static MaybeObject* Runtime_EnableAccessChecks(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(HeapObject, object, args[0]);
   Map* old_map = object->map();
@@ -1089,8 +1062,7 @@
 }
 
 
-static MaybeObject* Runtime_DeclareGlobals(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
   ASSERT(args.length() == 4);
   HandleScope scope(isolate);
   Handle<GlobalObject> global = Handle<GlobalObject>(
@@ -1233,8 +1205,7 @@
 }
 
 
-static MaybeObject* Runtime_DeclareContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
@@ -1340,8 +1311,7 @@
 }
 
 
-static MaybeObject* Runtime_InitializeVarGlobal(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   NoHandleAllocation nha;
   // args[0] == name
   // args[1] == strict_mode
@@ -1436,8 +1406,7 @@
 }
 
 
-static MaybeObject* Runtime_InitializeConstGlobal(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
   // All constants are declared with an initial value. The name
   // of the constant is the first argument and the initial value
   // is the second.
@@ -1527,9 +1496,7 @@
 }
 
 
-static MaybeObject* Runtime_InitializeConstContextSlot(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
 
@@ -1636,9 +1603,8 @@
 }
 
 
-static MaybeObject* Runtime_OptimizeObjectForAddingMultipleProperties(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*,
+                 Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, object, 0);
@@ -1650,8 +1616,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpExec(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
@@ -1673,8 +1638,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpConstructResult(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
   ASSERT(args.length() == 3);
   CONVERT_SMI_CHECKED(elements_count, args[0]);
   if (elements_count > JSArray::kMaxFastElementsLength) {
@@ -1707,8 +1671,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpInitializeObject(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
   AssertNoAllocation no_alloc;
   ASSERT(args.length() == 5);
   CONVERT_CHECKED(JSRegExp, regexp, args[0]);
@@ -1774,9 +1737,7 @@
 }
 
 
-static MaybeObject* Runtime_FinishArrayPrototypeSetup(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSArray, prototype, 0);
@@ -1805,8 +1766,7 @@
 }
 
 
-static MaybeObject* Runtime_SpecialArrayFunctions(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, holder, 0);
@@ -1823,8 +1783,7 @@
 }
 
 
-static MaybeObject* Runtime_GetGlobalReceiver(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetGlobalReceiver) {
   // Returns a real global receiver, not one of builtins object.
   Context* global_context =
       isolate->context()->global()->global_context();
@@ -1832,9 +1791,7 @@
 }
 
 
-static MaybeObject* Runtime_MaterializeRegExpLiteral(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -1864,8 +1821,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetName(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1874,8 +1830,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetName(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1886,9 +1841,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionRemovePrototype(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1900,8 +1853,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -1913,8 +1865,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetSourceCode(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1923,9 +1874,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetScriptSourcePosition(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1935,9 +1884,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetPositionForOffset(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
   ASSERT(args.length() == 2);
 
   CONVERT_CHECKED(Code, code, args[0]);
@@ -1950,9 +1897,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetInstanceClassName(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1963,8 +1908,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetLength(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1975,8 +1919,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetPrototype(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1991,8 +1934,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionIsAPIFunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -2002,8 +1944,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionIsBuiltin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -2013,8 +1954,7 @@
 }
 
 
-static MaybeObject* Runtime_SetCode(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
@@ -2077,9 +2017,7 @@
 }
 
 
-static MaybeObject* Runtime_SetExpectedNumberOfProperties(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -2102,8 +2040,7 @@
 }
 
 
-static MaybeObject* Runtime_StringCharCodeAt(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -2139,8 +2076,7 @@
 }
 
 
-static MaybeObject* Runtime_CharFromCode(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   return CharFromCode(isolate, args[0]);
@@ -2874,9 +2810,7 @@
 }
 
 
-static MaybeObject* Runtime_StringReplaceRegExpWithString(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
   ASSERT(args.length() == 4);
 
   CONVERT_CHECKED(String, subject, args[0]);
@@ -2978,8 +2912,7 @@
 }
 
 
-static MaybeObject* Runtime_StringIndexOf(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
@@ -3031,8 +2964,7 @@
   return -1;
 }
 
-static MaybeObject* Runtime_StringLastIndexOf(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
@@ -3089,8 +3021,7 @@
 }
 
 
-static MaybeObject* Runtime_StringLocaleCompare(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3138,8 +3069,7 @@
 }
 
 
-static MaybeObject* Runtime_SubString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -3166,8 +3096,7 @@
 }
 
 
-static MaybeObject* Runtime_StringMatch(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
   ASSERT_EQ(3, args.length());
 
   CONVERT_ARG_CHECKED(String, subject, 0);
@@ -3533,8 +3462,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpExecMultiple(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
   ASSERT(args.length() == 4);
   HandleScope handles(isolate);
 
@@ -3589,8 +3517,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToRadixString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3629,8 +3556,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToFixed(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3655,8 +3581,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToExponential(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3681,8 +3606,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToPrecision(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3792,8 +3716,7 @@
 }
 
 
-static MaybeObject* Runtime_GetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3805,8 +3728,7 @@
 
 
 // KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
-static MaybeObject* Runtime_KeyedGetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3880,9 +3802,7 @@
 // Steps 9c & 12 - replace an existing data property with an accessor property.
 // Step 12 - update an existing accessor property with an accessor or generic
 //           descriptor.
-static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
   ASSERT(args.length() == 5);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -3919,9 +3839,7 @@
 // Steps 9b & 12 - replace an existing accessor property with a data property.
 // Step 12 - update an existing data property with a data or generic
 //           descriptor.
-static MaybeObject* Runtime_DefineOrRedefineDataProperty(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
   ASSERT(args.length() == 4);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSObject, js_object, 0);
@@ -4157,8 +4075,7 @@
 }
 
 
-static MaybeObject* Runtime_SetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
 
@@ -4191,9 +4108,7 @@
 
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
-static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
   CONVERT_CHECKED(JSObject, object, args[0]);
@@ -4214,8 +4129,7 @@
 }
 
 
-static MaybeObject* Runtime_DeleteProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -4246,8 +4160,7 @@
 }
 
 
-static MaybeObject* Runtime_HasLocalProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[1]);
@@ -4277,8 +4190,7 @@
 }
 
 
-static MaybeObject* Runtime_HasProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
 
@@ -4292,8 +4204,7 @@
 }
 
 
-static MaybeObject* Runtime_HasElement(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
 
@@ -4308,8 +4219,7 @@
 }
 
 
-static MaybeObject* Runtime_IsPropertyEnumerable(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -4326,8 +4236,7 @@
 }
 
 
-static MaybeObject* Runtime_GetPropertyNames(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, object, 0);
@@ -4340,8 +4249,7 @@
 // all enumerable properties of the object and its prototypes
 // have none, the map of the object. This is used to speed up
 // the check for deletions during a for-in.
-static MaybeObject* Runtime_GetPropertyNamesFast(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSObject, raw_object, args[0]);
@@ -4377,8 +4285,7 @@
 
 // Return the names of the local named properties.
 // args[0]: object
-static MaybeObject* Runtime_GetLocalPropertyNames(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   if (!args[0]->IsJSObject()) {
@@ -4464,8 +4371,7 @@
 
 // Return the names of the local indexed properties.
 // args[0]: object
-static MaybeObject* Runtime_GetLocalElementNames(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   if (!args[0]->IsJSObject()) {
@@ -4482,8 +4388,7 @@
 
 // Return information on whether an object has a named or indexed interceptor.
 // args[0]: object
-static MaybeObject* Runtime_GetInterceptorInfo(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   if (!args[0]->IsJSObject()) {
@@ -4501,9 +4406,7 @@
 
 // Return property names from named interceptor.
 // args[0]: object
-static MaybeObject* Runtime_GetNamedInterceptorPropertyNames(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4518,9 +4421,7 @@
 
 // Return element names from indexed interceptor.
 // args[0]: object
-static MaybeObject* Runtime_GetIndexedInterceptorElementNames(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4533,8 +4434,7 @@
 }
 
 
-static MaybeObject* Runtime_LocalKeys(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
   ASSERT_EQ(args.length(), 1);
   CONVERT_CHECKED(JSObject, raw_object, args[0]);
   HandleScope scope(isolate);
@@ -4579,8 +4479,7 @@
 }
 
 
-static MaybeObject* Runtime_GetArgumentsProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -4633,8 +4532,7 @@
 }
 
 
-static MaybeObject* Runtime_ToFastProperties(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 1);
@@ -4650,8 +4548,7 @@
 }
 
 
-static MaybeObject* Runtime_ToSlowProperties(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 1);
@@ -4664,8 +4561,7 @@
 }
 
 
-static MaybeObject* Runtime_ToBool(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -4675,8 +4571,7 @@
 
 // Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
 // Possible optimizations: put the type string into the oddballs.
-static MaybeObject* Runtime_Typeof(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
   NoHandleAllocation ha;
 
   Object* obj = args[0];
@@ -4735,8 +4630,7 @@
 }
 
 
-static MaybeObject* Runtime_StringToNumber(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, subject, args[0]);
@@ -4790,9 +4684,7 @@
 }
 
 
-static MaybeObject* Runtime_StringFromCharCodeArray(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -4872,8 +4764,7 @@
 }
 
 
-static MaybeObject* Runtime_URIEscape(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
   const char hex_chars[] = "0123456789ABCDEF";
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
@@ -4992,8 +4883,7 @@
 }
 
 
-static MaybeObject* Runtime_URIUnescape(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, source, args[0]);
@@ -5237,8 +5127,7 @@
 }
 
 
-static MaybeObject* Runtime_QuoteJSONString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
   NoHandleAllocation ha;
   CONVERT_CHECKED(String, str, args[0]);
   if (!str->IsFlat()) {
@@ -5260,8 +5149,7 @@
 }
 
 
-static MaybeObject* Runtime_QuoteJSONStringComma(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
   NoHandleAllocation ha;
   CONVERT_CHECKED(String, str, args[0]);
   if (!str->IsFlat()) {
@@ -5282,8 +5170,7 @@
   }
 }
 
-static MaybeObject* Runtime_StringParseInt(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
   NoHandleAllocation ha;
 
   CONVERT_CHECKED(String, s, args[0]);
@@ -5297,8 +5184,7 @@
 }
 
 
-static MaybeObject* Runtime_StringParseFloat(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
   NoHandleAllocation ha;
   CONVERT_CHECKED(String, str, args[0]);
 
@@ -5589,15 +5475,13 @@
 }
 
 
-static MaybeObject* Runtime_StringToLowerCase(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
   return ConvertCase<ToLowerTraits>(
       args, isolate, isolate->runtime_state()->to_lower_mapping());
 }
 
 
-static MaybeObject* Runtime_StringToUpperCase(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
   return ConvertCase<ToUpperTraits>(
       args, isolate, isolate->runtime_state()->to_upper_mapping());
 }
@@ -5608,8 +5492,7 @@
 }
 
 
-static MaybeObject* Runtime_StringTrim(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -5659,8 +5542,7 @@
 }
 
 
-static MaybeObject* Runtime_StringSplit(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
   ASSERT(args.length() == 3);
   HandleScope handle_scope(isolate);
   CONVERT_ARG_CHECKED(String, subject, 0);
@@ -5791,8 +5673,7 @@
 
 // Converts a String to JSArray.
 // For example, "foo" => ["f", "o", "o"].
-static MaybeObject* Runtime_StringToArray(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(String, s, 0);
@@ -5840,8 +5721,7 @@
 }
 
 
-static MaybeObject* Runtime_NewStringWrapper(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, value, args[0]);
@@ -5856,8 +5736,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5868,9 +5747,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToStringSkipCache(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5881,8 +5758,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToInteger(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5896,9 +5772,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToIntegerMapMinusZero(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5917,8 +5791,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToJSUint32(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5927,8 +5800,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToJSInt32(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5944,8 +5816,7 @@
 
 // Converts a Number to a Smi, if possible. Returns NaN if the number is not
 // a small integer.
-static MaybeObject* Runtime_NumberToSmi(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5964,16 +5835,14 @@
 }
 
 
-static MaybeObject* Runtime_AllocateHeapNumber(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
   return isolate->heap()->AllocateHeapNumber(0);
 }
 
 
-static MaybeObject* Runtime_NumberAdd(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -5983,8 +5852,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberSub(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -5994,8 +5862,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberMul(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6005,8 +5872,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberUnaryMinus(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -6015,8 +5881,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberAlloc(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
 
@@ -6024,8 +5889,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberDiv(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6035,8 +5899,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberMod(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6049,8 +5912,7 @@
 }
 
 
-static MaybeObject* Runtime_StringAdd(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, str1, args[0]);
@@ -6099,8 +5961,7 @@
 }
 
 
-static MaybeObject* Runtime_StringBuilderConcat(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSArray, array, args[0]);
@@ -6213,8 +6074,7 @@
 }
 
 
-static MaybeObject* Runtime_StringBuilderJoin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSArray, array, args[0]);
@@ -6298,8 +6158,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberOr(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6309,8 +6168,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberAnd(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6320,8 +6178,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberXor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6331,8 +6188,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberNot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -6341,8 +6197,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberShl(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6352,8 +6207,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberShr(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6363,8 +6217,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberSar(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6374,8 +6227,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberEquals(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6394,8 +6246,7 @@
 }
 
 
-static MaybeObject* Runtime_StringEquals(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6413,8 +6264,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberCompare(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -6429,9 +6279,7 @@
 
 // Compare two Smis as if they were converted to strings and then
 // compared lexicographically.
-static MaybeObject* Runtime_SmiLexicographicCompare(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6554,8 +6402,7 @@
 }
 
 
-static MaybeObject* Runtime_StringCompare(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6590,8 +6437,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_acos(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_acos()->Increment();
@@ -6601,8 +6447,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_asin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_asin()->Increment();
@@ -6612,8 +6457,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_atan(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_atan()->Increment();
@@ -6626,8 +6470,7 @@
 static const double kPiDividedBy4 = 0.78539816339744830962;
 
 
-static MaybeObject* Runtime_Math_atan2(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   isolate->counters()->math_atan2()->Increment();
@@ -6650,8 +6493,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_ceil(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_ceil()->Increment();
@@ -6661,8 +6503,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_cos(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_cos()->Increment();
@@ -6672,8 +6513,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_exp(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_exp()->Increment();
@@ -6683,8 +6523,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_floor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_floor()->Increment();
@@ -6694,8 +6533,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_log(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_log()->Increment();
@@ -6705,8 +6543,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_pow(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   isolate->counters()->math_pow()->Increment();
@@ -6726,8 +6563,7 @@
 
 // Fast version of Math.pow if we know that y is not an integer and
 // y is not -0.5 or 0.5. Used as slowcase from codegen.
-static MaybeObject* Runtime_Math_pow_cfunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_DOUBLE_CHECKED(x, args[0]);
@@ -6742,8 +6578,7 @@
 }
 
 
-static MaybeObject* Runtime_RoundNumber(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_round()->Increment();
@@ -6779,8 +6614,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_sin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_sin()->Increment();
@@ -6790,8 +6624,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_sqrt(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_sqrt()->Increment();
@@ -6801,8 +6634,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_tan(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_tan()->Increment();
@@ -6857,8 +6689,7 @@
 }
 
 
-static MaybeObject* Runtime_DateMakeDay(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -7157,8 +6988,7 @@
 }
 
 
-static MaybeObject* Runtime_DateYMDFromTime(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -7181,8 +7011,7 @@
 }
 
 
-static MaybeObject* Runtime_NewArgumentsFast(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -7218,8 +7047,7 @@
 }
 
 
-static MaybeObject* Runtime_NewClosure(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(Context, context, 0);
@@ -7238,42 +7066,69 @@
   return *result;
 }
 
-static MaybeObject* Runtime_NewObjectFromBound(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+
+static SmartPointer<Object**> GetNonBoundArguments(int bound_argc,
+                                                   int* total_argc) {
+  // Find frame containing arguments passed to the caller.
+  JavaScriptFrameIterator it;
+  JavaScriptFrame* frame = it.frame();
+  List<JSFunction*> functions(2);
+  frame->GetFunctions(&functions);
+  if (functions.length() > 1) {
+    int inlined_frame_index = functions.length() - 1;
+    JSFunction* inlined_function = functions[inlined_frame_index];
+    int args_count = inlined_function->shared()->formal_parameter_count();
+    ScopedVector<SlotRef> args_slots(args_count);
+    SlotRef::ComputeSlotMappingForArguments(frame,
+                                            inlined_frame_index,
+                                            &args_slots);
+
+    *total_argc = bound_argc + args_count;
+    SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    for (int i = 0; i < args_count; i++) {
+      Handle<Object> val = args_slots[i].GetValue();
+      param_data[bound_argc + i] = val.location();
+    }
+    return param_data;
+  } else {
+    it.AdvanceToArgumentsFrame();
+    frame = it.frame();
+    int args_count = frame->ComputeParametersCount();
+
+    *total_argc = bound_argc + args_count;
+    SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    for (int i = 0; i < args_count; i++) {
+      Handle<Object> val = Handle<Object>(frame->GetParameter(i));
+      param_data[bound_argc + i] = val.location();
+    }
+    return param_data;
+  }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   // First argument is a function to use as a constructor.
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
 
   // Second argument is either null or an array of bound arguments.
-  FixedArray* bound_args = NULL;
+  Handle<FixedArray> bound_args;
   int bound_argc = 0;
   if (!args[1]->IsNull()) {
     CONVERT_ARG_CHECKED(JSArray, params, 1);
     RUNTIME_ASSERT(params->HasFastElements());
-    bound_args = FixedArray::cast(params->elements());
+    bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
     bound_argc = Smi::cast(params->length())->value();
   }
 
-  // Find frame containing arguments passed to the caller.
-  JavaScriptFrameIterator it;
-  JavaScriptFrame* frame = it.frame();
-  ASSERT(!frame->is_optimized());
-  it.AdvanceToArgumentsFrame();
-  frame = it.frame();
-  int argc = frame->ComputeParametersCount();
-
-  // Prepend bound arguments to caller's arguments.
-  int total_argc = bound_argc + argc;
-  SmartPointer<Object**> param_data(NewArray<Object**>(total_argc));
+  int total_argc = 0;
+  SmartPointer<Object**> param_data =
+      GetNonBoundArguments(bound_argc, &total_argc);
   for (int i = 0; i < bound_argc; i++) {
     Handle<Object> val = Handle<Object>(bound_args->get(i));
     param_data[i] = val.location();
   }
-  for (int i = 0; i < argc; i++) {
-    Handle<Object> val = Handle<Object>(frame->GetParameter(i));
-    param_data[bound_argc + i] = val.location();
-  }
 
   bool exception = false;
   Handle<Object> result =
@@ -7304,8 +7159,7 @@
 }
 
 
-static MaybeObject* Runtime_NewObject(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -7385,8 +7239,7 @@
 }
 
 
-static MaybeObject* Runtime_FinalizeInstanceSize(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -7398,8 +7251,7 @@
 }
 
 
-static MaybeObject* Runtime_LazyCompile(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -7430,8 +7282,7 @@
 }
 
 
-static MaybeObject* Runtime_LazyRecompile(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   Handle<JSFunction> function = args.at<JSFunction>(0);
@@ -7462,8 +7313,7 @@
 }
 
 
-static MaybeObject* Runtime_NotifyDeoptimized(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(args[0]->IsSmi());
@@ -7537,16 +7387,14 @@
 }
 
 
-static MaybeObject* Runtime_NotifyOSR(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   delete deoptimizer;
   return isolate->heap()->undefined_value();
 }
 
 
-static MaybeObject* Runtime_DeoptimizeFunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -7558,9 +7406,7 @@
 }
 
 
-static MaybeObject* Runtime_CompileForOnStackReplacement(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -7674,8 +7520,7 @@
 }
 
 
-static MaybeObject* Runtime_GetFunctionDelegate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(!args[0]->IsJSFunction());
@@ -7683,8 +7528,7 @@
 }
 
 
-static MaybeObject* Runtime_GetConstructorDelegate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(!args[0]->IsJSFunction());
@@ -7692,8 +7536,7 @@
 }
 
 
-static MaybeObject* Runtime_NewContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -7744,24 +7587,21 @@
 }
 
 
-static MaybeObject* Runtime_PushContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   return PushContextHelper(isolate, args[0], false);
 }
 
 
-static MaybeObject* Runtime_PushCatchContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   return PushContextHelper(isolate, args[0], true);
 }
 
 
-static MaybeObject* Runtime_DeleteContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
@@ -7921,21 +7761,17 @@
 }
 
 
-static ObjectPair Runtime_LoadContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
   return LoadContextSlotHelper(args, isolate, true);
 }
 
 
-static ObjectPair Runtime_LoadContextSlotNoReferenceError(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
   return LoadContextSlotHelper(args, isolate, false);
 }
 
 
-static MaybeObject* Runtime_StoreContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
@@ -8009,8 +7845,7 @@
 }
 
 
-static MaybeObject* Runtime_Throw(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -8018,8 +7853,7 @@
 }
 
 
-static MaybeObject* Runtime_ReThrow(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -8027,16 +7861,13 @@
 }
 
 
-static MaybeObject* Runtime_PromoteScheduledException(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
   ASSERT_EQ(0, args.length());
   return isolate->PromoteScheduledException();
 }
 
 
-static MaybeObject* Runtime_ThrowReferenceError(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -8048,8 +7879,7 @@
 }
 
 
-static MaybeObject* Runtime_StackGuard(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
   ASSERT(args.length() == 0);
 
   // First check if this is a real stack overflow.
@@ -8148,8 +7978,7 @@
 }
 
 
-static MaybeObject* Runtime_TraceEnter(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
   ASSERT(args.length() == 0);
   NoHandleAllocation ha;
   PrintTransition(NULL);
@@ -8157,16 +7986,14 @@
 }
 
 
-static MaybeObject* Runtime_TraceExit(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
   NoHandleAllocation ha;
   PrintTransition(args[0]);
   return args[0];  // return TOS
 }
 
 
-static MaybeObject* Runtime_DebugPrint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -8197,8 +8024,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugTrace(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
   ASSERT(args.length() == 0);
   NoHandleAllocation ha;
   isolate->PrintStack();
@@ -8206,8 +8032,7 @@
 }
 
 
-static MaybeObject* Runtime_DateCurrentTime(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
 
@@ -8220,8 +8045,7 @@
 }
 
 
-static MaybeObject* Runtime_DateParseString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
@@ -8251,8 +8075,7 @@
 }
 
 
-static MaybeObject* Runtime_DateLocalTimezone(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -8262,8 +8085,7 @@
 }
 
 
-static MaybeObject* Runtime_DateLocalTimeOffset(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimeOffset) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
 
@@ -8271,9 +8093,7 @@
 }
 
 
-static MaybeObject* Runtime_DateDaylightSavingsOffset(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateDaylightSavingsOffset) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -8282,8 +8102,7 @@
 }
 
 
-static MaybeObject* Runtime_GlobalReceiver(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
   ASSERT(args.length() == 1);
   Object* global = args[0];
   if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
@@ -8291,7 +8110,7 @@
 }
 
 
-static MaybeObject* Runtime_ParseJson(RUNTIME_CALLING_CONVENTION) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
@@ -8306,8 +8125,7 @@
 }
 
 
-static MaybeObject* Runtime_CompileString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
@@ -8346,9 +8164,7 @@
 }
 
 
-static ObjectPair Runtime_ResolvePossiblyDirectEval(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
   ASSERT(args.length() == 4);
 
   HandleScope scope(isolate);
@@ -8424,9 +8240,7 @@
 }
 
 
-static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
   ASSERT(args.length() == 4);
 
   HandleScope scope(isolate);
@@ -8449,9 +8263,7 @@
 }
 
 
-static MaybeObject* Runtime_SetNewFunctionAttributes(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
   // This utility adjusts the property attributes for newly created Function
   // object ("new Function(...)") by changing the map.
   // All it does is changing the prototype property to enumerable
@@ -8471,8 +8283,7 @@
 }
 
 
-static MaybeObject* Runtime_AllocateInNewSpace(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
   // Allocate a block of memory in NewSpace (filled with a filler).
   // Use as fallback for allocation in generated code when NewSpace
   // is full.
@@ -8497,8 +8308,7 @@
 // Push an object unto an array of objects if it is not already in the
 // array.  Returns true if the element was pushed on the stack and
 // false otherwise.
-static MaybeObject* Runtime_PushIfAbsent(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, array, args[0]);
   CONVERT_CHECKED(JSObject, element, args[1]);
@@ -8947,8 +8757,7 @@
  * TODO(581): Fix non-compliance for very large concatenations and update to
  * following the ECMAScript 5 specification.
  */
-static MaybeObject* Runtime_ArrayConcat(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
   ASSERT(args.length() == 1);
   HandleScope handle_scope(isolate);
 
@@ -9036,8 +8845,7 @@
 
 // This will not allocate (flatten the string), but it may run
 // very slowly for very deeply nested ConsStrings.  For debugging use only.
-static MaybeObject* Runtime_GlobalPrint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -9055,8 +8863,7 @@
 // and are followed by non-existing element. Does not change the length
 // property.
 // Returns the number of non-undefined elements collected.
-static MaybeObject* Runtime_RemoveArrayHoles(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSObject, object, args[0]);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
@@ -9065,8 +8872,7 @@
 
 
 // Move contents of argument 0 (an array) to argument 1 (an array)
-static MaybeObject* Runtime_MoveArrayContents(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, from, args[0]);
   CONVERT_CHECKED(JSArray, to, args[1]);
@@ -9093,9 +8899,7 @@
 
 
 // How many elements does this object/array have?
-static MaybeObject* Runtime_EstimateNumberOfElements(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, object, args[0]);
   HeapObject* elements = object->elements();
@@ -9109,8 +8913,7 @@
 }
 
 
-static MaybeObject* Runtime_SwapElements(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
   HandleScope handle_scope(isolate);
 
   ASSERT_EQ(3, args.length());
@@ -9145,8 +8948,7 @@
 // intervals (pair of a negative integer (-start-1) followed by a
 // positive (length)) or undefined values.
 // Intervals can span over some keys that are not in the object.
-static MaybeObject* Runtime_GetArrayKeys(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSObject, array, 0);
@@ -9186,8 +8988,7 @@
 // to the way accessors are implemented, it is set for both the getter
 // and setter on the first call to DefineAccessor and ignored on
 // subsequent calls.
-static MaybeObject* Runtime_DefineAccessor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) {
   RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
   // Compute attributes.
   PropertyAttributes attributes = NONE;
@@ -9207,8 +9008,7 @@
 }
 
 
-static MaybeObject* Runtime_LookupAccessor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSObject, obj, args[0]);
   CONVERT_CHECKED(String, name, args[1]);
@@ -9218,8 +9018,7 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-static MaybeObject* Runtime_DebugBreak(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
   ASSERT(args.length() == 0);
   return Execution::DebugBreakHelper();
 }
@@ -9241,8 +9040,7 @@
 // args[0]: debug event listener function to set or null or undefined for
 //          clearing the event listener function
 // args[1]: object supplied during callback
-static MaybeObject* Runtime_SetDebugEventListener(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
   ASSERT(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsJSFunction() ||
                  args[0]->IsUndefined() ||
@@ -9255,8 +9053,7 @@
 }
 
 
-static MaybeObject* Runtime_Break(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
   ASSERT(args.length() == 0);
   isolate->stack_guard()->DebugBreak();
   return isolate->heap()->undefined_value();
@@ -9332,9 +9129,7 @@
 // 4: Setter function if defined
 // Items 2-4 are only filled if the property has either a getter or a setter
 // defined through __defineGetter__ and/or __defineSetter__.
-static MaybeObject* Runtime_DebugGetPropertyDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 2);
@@ -9434,8 +9229,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugGetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 2);
@@ -9454,9 +9248,7 @@
 
 // Return the property type calculated from the property details.
 // args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyTypeFromDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(Smi, details, args[0]);
   PropertyType type = PropertyDetails(details).type();
@@ -9466,9 +9258,7 @@
 
 // Return the property attribute calculated from the property details.
 // args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyAttributesFromDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(Smi, details, args[0]);
   PropertyAttributes attributes = PropertyDetails(details).attributes();
@@ -9478,9 +9268,7 @@
 
 // Return the property insertion index calculated from the property details.
 // args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyIndexFromDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(Smi, details, args[0]);
   int index = PropertyDetails(details).index();
@@ -9491,9 +9279,7 @@
 // Return property value from named interceptor.
 // args[0]: object
 // args[1]: property name
-static MaybeObject* Runtime_DebugNamedInterceptorPropertyValue(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -9508,9 +9294,7 @@
 // Return element value from indexed interceptor.
 // args[0]: object
 // args[1]: index
-static MaybeObject* Runtime_DebugIndexedInterceptorElementValue(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -9521,8 +9305,7 @@
 }
 
 
-static MaybeObject* Runtime_CheckExecutionState(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
   ASSERT(args.length() >= 1);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   // Check that the break id is valid.
@@ -9536,14 +9319,14 @@
 }
 
 
-static MaybeObject* Runtime_GetFrameCount(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
   // Check arguments.
   Object* result;
-  { MaybeObject* maybe_result = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
@@ -9587,14 +9370,14 @@
 // Arguments name, value
 // Locals name, value
 // Return value if any
-static MaybeObject* Runtime_GetFrameDetails(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
@@ -10211,14 +9994,14 @@
 };
 
 
-static MaybeObject* Runtime_GetScopeCount(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
@@ -10250,14 +10033,14 @@
 // The array returned contains the following information:
 // 0: Scope type
 // 1: Scope object
-static MaybeObject* Runtime_GetScopeDetails(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
@@ -10292,8 +10075,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugPrintScopes(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
 
@@ -10309,14 +10091,14 @@
 }
 
 
-static MaybeObject* Runtime_GetThreadCount(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
   // Check arguments.
   Object* result;
-  { MaybeObject* maybe_result = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
@@ -10345,14 +10127,14 @@
 // The array returned contains the following information:
 // 0: Is current thread?
 // 1: Thread id
-static MaybeObject* Runtime_GetThreadDetails(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
@@ -10395,8 +10177,7 @@
 
 // Sets the disable break state
 // args[0]: disable break state
-static MaybeObject* Runtime_SetDisableBreak(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
@@ -10405,8 +10186,7 @@
 }
 
 
-static MaybeObject* Runtime_GetBreakLocations(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -10425,8 +10205,7 @@
 // args[0]: function
 // args[1]: number: break source position (within the function source)
 // args[2]: number: break point object
-static MaybeObject* Runtime_SetFunctionBreakPoint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -10527,8 +10306,7 @@
 // args[0]: script to set break point in
 // args[1]: number: break source position (within the script source)
 // args[2]: number: break point object
-static MaybeObject* Runtime_SetScriptBreakPoint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
@@ -10562,8 +10340,7 @@
 
 // Clear a break point
 // args[0]: number: break point object
-static MaybeObject* Runtime_ClearBreakPoint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   Handle<Object> break_point_object_arg = args.at<Object>(0);
@@ -10578,8 +10355,7 @@
 // Change the state of break on exceptions.
 // args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
 // args[1]: Boolean indicating on/off.
-static MaybeObject* Runtime_ChangeBreakOnException(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsNumber());
@@ -10597,8 +10373,7 @@
 
 // Returns the state of break on exceptions
 // args[0]: boolean indicating uncaught exceptions
-static MaybeObject* Runtime_IsBreakOnException(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(args[0]->IsNumber());
@@ -10615,13 +10390,13 @@
 // args[1]: step action from the enumeration StepAction
 // args[2]: number of times to perform the step, for step out it is the number
 //          of frames to step down.
-static MaybeObject* Runtime_PrepareStep(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
@@ -10655,8 +10430,7 @@
 
 
 // Clear all stepping set by PrepareStep.
-static MaybeObject* Runtime_ClearStepping(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
   isolate->debug()->ClearStepping();
@@ -10739,16 +10513,15 @@
 // stack frame presenting the same view of the values of parameters and
 // local variables as if the piece of JavaScript was evaluated at the point
 // where the function on the stack frame is currently stopped.
-static MaybeObject* Runtime_DebugEvaluate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
   HandleScope scope(isolate);
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
   ASSERT(args.length() == 5);
   Object* check_result;
-  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args,
-                                                                  isolate);
+  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check_result->ToObject(&check_result)) {
       return maybe_check_result;
     }
@@ -10867,16 +10640,15 @@
 }
 
 
-static MaybeObject* Runtime_DebugEvaluateGlobal(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
   HandleScope scope(isolate);
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
   ASSERT(args.length() == 4);
   Object* check_result;
-  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args,
-                                                                  isolate);
+  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check_result->ToObject(&check_result)) {
       return maybe_check_result;
     }
@@ -10939,8 +10711,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugGetLoadedScripts(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
 
@@ -11041,8 +10812,7 @@
 // args[0]: the object to find references to
 // args[1]: constructor function for instances to exclude (Mirror)
 // args[2]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugReferencedBy(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
@@ -11122,8 +10892,7 @@
 // Scan the heap for objects constructed by a specific function.
 // args[0]: the constructor to find instances of
 // args[1]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugConstructedBy(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
@@ -11161,8 +10930,7 @@
 
 // Find the effective prototype object as returned by __proto__.
 // args[0]: the object to find the prototype for.
-static MaybeObject* Runtime_DebugGetPrototype(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSObject, obj, args[0]);
@@ -11172,17 +10940,14 @@
 }
 
 
-static MaybeObject* Runtime_SystemBreak(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
   ASSERT(args.length() == 0);
   CPU::DebugBreak();
   return isolate->heap()->undefined_value();
 }
 
 
-static MaybeObject* Runtime_DebugDisassembleFunction(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
 #ifdef DEBUG
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -11198,9 +10963,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugDisassembleConstructor(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
 #ifdef DEBUG
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -11216,9 +10979,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetInferredName(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -11254,9 +11015,8 @@
 // For a script finds all SharedFunctionInfo's in the heap that points
 // to this script. Returns JSArray of SharedFunctionInfo wrapped
 // in OpaqueReferences.
-static MaybeObject* Runtime_LiveEditFindSharedFunctionInfosForScript(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*,
+                 Runtime_LiveEditFindSharedFunctionInfosForScript) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, script_value, args[0]);
@@ -11288,9 +11048,7 @@
 // Returns a JSArray of compilation infos. The array is ordered so that
 // each function with all its descendant is always stored in a continues range
 // with the function itself going first. The root function is a script function.
-static MaybeObject* Runtime_LiveEditGatherCompileInfo(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, script, args[0]);
@@ -11309,8 +11067,7 @@
 // Changes the source of the script to a new_source.
 // If old_script_name is provided (i.e. is a String), also creates a copy of
 // the script with its original source and sends notification to debugger.
-static MaybeObject* Runtime_LiveEditReplaceScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, original_script_value, args[0]);
@@ -11334,9 +11091,7 @@
 }
 
 
-static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
@@ -11345,9 +11100,7 @@
 
 
 // Replaces code of SharedFunctionInfo with a new one.
-static MaybeObject* Runtime_LiveEditReplaceFunctionCode(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
@@ -11357,9 +11110,7 @@
 }
 
 // Connects SharedFunctionInfo to another script.
-static MaybeObject* Runtime_LiveEditFunctionSetScript(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   Handle<Object> function_object(args[0], isolate);
@@ -11384,9 +11135,7 @@
 
 // In a code of a parent function replaces original function as embedded object
 // with a substitution one.
-static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
 
@@ -11406,9 +11155,7 @@
 // array of groups of 3 numbers:
 // (change_begin, change_end, change_end_new_position).
 // Each group describes a change in text; groups are sorted by change_begin.
-static MaybeObject* Runtime_LiveEditPatchFunctionPositions(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
@@ -11422,9 +11169,7 @@
 // checks that none of them have activations on stacks (of any thread).
 // Returns array of the same length with corresponding results of
 // LiveEdit::FunctionPatchabilityStatus type.
-static MaybeObject* Runtime_LiveEditCheckAndDropActivations(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
@@ -11436,8 +11181,7 @@
 // Compares 2 strings line-by-line, then token-wise and returns diff in form
 // of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
 // of diff chunks.
-static MaybeObject* Runtime_LiveEditCompareStrings(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(String, s1, 0);
@@ -11449,9 +11193,7 @@
 
 // A testing entry. Returns statement position which is the closest to
 // source_position.
-static MaybeObject* Runtime_GetFunctionCodePositionFromSource(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -11488,8 +11230,7 @@
 // Calls specified function with or without entering the debugger.
 // This is used in unit tests to run code as if debugger is entered or simply
 // to have a stack with C++ frame in the middle.
-static MaybeObject* Runtime_ExecuteInDebugContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -11516,8 +11257,7 @@
 
 
 // Sets a v8 flag.
-static MaybeObject* Runtime_SetFlags(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
   CONVERT_CHECKED(String, arg, args[0]);
   SmartPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -11528,16 +11268,14 @@
 
 // Performs a GC.
 // Presently, it only does a full GC.
-static MaybeObject* Runtime_CollectGarbage(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
   isolate->heap()->CollectAllGarbage(true);
   return isolate->heap()->undefined_value();
 }
 
 
 // Gets the current heap usage.
-static MaybeObject* Runtime_GetHeapUsage(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
   int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
   if (!Smi::IsValid(usage)) {
     return *isolate->factory()->NewNumberFromInt(usage);
@@ -11547,8 +11285,7 @@
 
 
 // Captures a live object list from the present heap.
-static MaybeObject* Runtime_HasLOLEnabled(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) {
 #ifdef LIVE_OBJECT_LIST
   return isolate->heap()->true_value();
 #else
@@ -11558,8 +11295,7 @@
 
 
 // Captures a live object list from the present heap.
-static MaybeObject* Runtime_CaptureLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) {
 #ifdef LIVE_OBJECT_LIST
   return LiveObjectList::Capture();
 #else
@@ -11569,8 +11305,7 @@
 
 
 // Deletes the specified live object list.
-static MaybeObject* Runtime_DeleteLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
 #ifdef LIVE_OBJECT_LIST
   CONVERT_SMI_CHECKED(id, args[0]);
   bool success = LiveObjectList::Delete(id);
@@ -11587,8 +11322,7 @@
 // specified by id1 and id2.
 // If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
 // dumped.
-static MaybeObject* Runtime_DumpLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(id1, args[0]);
@@ -11606,8 +11340,7 @@
 
 // Gets the specified object as requested by the debugger.
 // This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObj(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
 #ifdef LIVE_OBJECT_LIST
   CONVERT_SMI_CHECKED(obj_id, args[0]);
   Object* result = LiveObjectList::GetObj(obj_id);
@@ -11620,8 +11353,7 @@
 
 // Gets the obj id for the specified address if valid.
 // This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObjId(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_ARG_CHECKED(String, address, 0);
@@ -11634,8 +11366,7 @@
 
 
 // Gets the retainers that references the specified object alive.
-static MaybeObject* Runtime_GetLOLObjRetainers(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(obj_id, args[0]);
@@ -11675,8 +11406,7 @@
 
 
 // Gets the reference path between 2 objects.
-static MaybeObject* Runtime_GetLOLPath(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(obj_id1, args[0]);
@@ -11699,8 +11429,7 @@
 
 // Generates the response to a debugger request for a list of all
 // previously captured live object lists.
-static MaybeObject* Runtime_InfoLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
 #ifdef LIVE_OBJECT_LIST
   CONVERT_SMI_CHECKED(start, args[0]);
   CONVERT_SMI_CHECKED(count, args[1]);
@@ -11713,8 +11442,7 @@
 
 // Gets a dump of the specified object as requested by the debugger.
 // This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_PrintLOLObj(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(obj_id, args[0]);
@@ -11727,8 +11455,7 @@
 
 
 // Resets and releases all previously captured live object lists.
-static MaybeObject* Runtime_ResetLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) {
 #ifdef LIVE_OBJECT_LIST
   LiveObjectList::Reset();
   return isolate->heap()->undefined_value();
@@ -11743,8 +11470,7 @@
 // specified by id1 and id2.
 // If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
 // summarized.
-static MaybeObject* Runtime_SummarizeLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(id1, args[0]);
@@ -11762,8 +11488,7 @@
 
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
-static MaybeObject* Runtime_ProfilerResume(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -11774,8 +11499,7 @@
 }
 
 
-static MaybeObject* Runtime_ProfilerPause(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -11822,8 +11546,7 @@
 // Get the script object from script data. NOTE: Regarding performance
 // see the NOTE for GetScriptFromScriptData.
 // args[0]: script data for the script to find the source for
-static MaybeObject* Runtime_GetScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 1);
@@ -11868,8 +11591,7 @@
 // Collect the raw data for a stack trace.  Returns an array of 4
 // element segments each containing a receiver, function, code and
 // native code offset.
-static MaybeObject* Runtime_CollectStackTrace(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
   ASSERT_EQ(args.length(), 2);
   Handle<Object> caller = args.at<Object>(0);
   CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
@@ -11926,8 +11648,7 @@
 
 
 // Returns V8 version as a string.
-static MaybeObject* Runtime_GetV8Version(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
   ASSERT_EQ(args.length(), 0);
 
   NoHandleAllocation ha;
@@ -11939,8 +11660,7 @@
 }
 
 
-static MaybeObject* Runtime_Abort(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
   ASSERT(args.length() == 2);
   OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
                                     Smi::cast(args[1])->value());
@@ -11951,8 +11671,7 @@
 }
 
 
-static MaybeObject* Runtime_GetFromCache(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
   // This is only called from codegen, so checks might be more lax.
   CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
   Object* key = args[1];
@@ -12044,8 +11763,7 @@
 }
 
 
-static MaybeObject* Runtime_NewMessageObject(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(String, type, 0);
   CONVERT_ARG_CHECKED(JSArray, arguments, 1);
@@ -12060,30 +11778,25 @@
 }
 
 
-static MaybeObject* Runtime_MessageGetType(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->type();
 }
 
 
-static MaybeObject* Runtime_MessageGetArguments(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->arguments();
 }
 
 
-static MaybeObject* Runtime_MessageGetStartPosition(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return Smi::FromInt(message->start_position());
 }
 
 
-static MaybeObject* Runtime_MessageGetScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->script();
 }
@@ -12092,8 +11805,7 @@
 #ifdef DEBUG
 // ListNatives is ONLY used by the fuzz-natives.js in debug mode
 // Exclude the code in release mode.
-static MaybeObject* Runtime_ListNatives(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
   ASSERT(args.length() == 0);
   HandleScope scope;
 #define COUNT_ENTRY(Name, argc, ressize) + 1
@@ -12137,8 +11849,7 @@
 #endif
 
 
-static MaybeObject* Runtime_Log(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, format, args[0]);
   CONVERT_CHECKED(JSArray, elms, args[1]);
@@ -12148,7 +11859,7 @@
 }
 
 
-static MaybeObject* Runtime_IS_VAR(RUNTIME_CALLING_CONVENTION) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
   UNREACHABLE();  // implemented as macro in the parser
   return NULL;
 }
diff --git a/src/scopes.cc b/src/scopes.cc
index f4bcaa8..70e11ed 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -361,12 +361,14 @@
 }
 
 
-VariableProxy* Scope::NewUnresolved(Handle<String> name, bool inside_with) {
+VariableProxy* Scope::NewUnresolved(Handle<String> name,
+                                    bool inside_with,
+                                    int position) {
   // Note that we must not share the unresolved variables with
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
   ASSERT(!resolved());
-  VariableProxy* proxy = new VariableProxy(name, false, inside_with);
+  VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
   unresolved_.Add(proxy);
   return proxy;
 }
diff --git a/src/scopes.h b/src/scopes.h
index 24622b4..5f031ed 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -149,7 +149,9 @@
   void AddParameter(Variable* var);
 
   // Create a new unresolved variable.
-  virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with);
+  virtual VariableProxy* NewUnresolved(Handle<String> name,
+                                       bool inside_with,
+                                       int position = RelocInfo::kNoPosition);
 
   // Remove a unresolved variable. During parsing, an unresolved variable
   // may have been added optimistically, but then only the variable name
@@ -479,7 +481,9 @@
 
   virtual Variable* Lookup(Handle<String> name)  { return NULL; }
 
-  virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
+  virtual VariableProxy* NewUnresolved(Handle<String> name,
+                                       bool inside_with,
+                                       int position = RelocInfo::kNoPosition) {
     return NULL;
   }
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 20700e1..eb4fa7d 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -3014,7 +3014,8 @@
       }
 
       // Free the chunk.
-      heap()->mark_compact_collector()->ReportDeleteIfNeeded(object);
+      heap()->mark_compact_collector()->ReportDeleteIfNeeded(
+          object, heap()->isolate());
       LiveObjectList::ProcessNonLive(object);
 
       size_ -= static_cast<int>(chunk_size);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 435e71d..0c6a7f7 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1278,8 +1278,7 @@
 // StubCompiler implementation.
 
 
-MaybeObject* LoadCallbackProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
   ASSERT(args[0]->IsJSObject());
   ASSERT(args[1]->IsJSObject());
   AccessorInfo* callback = AccessorInfo::cast(args[3]);
@@ -1301,8 +1300,7 @@
 }
 
 
-MaybeObject* StoreCallbackProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
   JSObject* recv = JSObject::cast(args[0]);
   AccessorInfo* callback = AccessorInfo::cast(args[1]);
   Address setter_address = v8::ToCData<Address>(callback->setter());
@@ -1335,8 +1333,7 @@
  * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
  * provide any value for the given name.
  */
-MaybeObject* LoadPropertyWithInterceptorOnly(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
   Handle<String> name_handle = args.at<String>(0);
   Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
   ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
@@ -1435,8 +1432,7 @@
  * Loads a property with an interceptor performing post interceptor
  * lookup if interceptor failed.
  */
-MaybeObject* LoadPropertyWithInterceptorForLoad(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
   PropertyAttributes attr = NONE;
   Object* result;
   { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
@@ -1449,8 +1445,7 @@
 }
 
 
-MaybeObject* LoadPropertyWithInterceptorForCall(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
   PropertyAttributes attr;
   MaybeObject* result = LoadWithInterceptor(&args, &attr);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -1461,8 +1456,7 @@
 }
 
 
-MaybeObject* StoreInterceptorProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
   ASSERT(args.length() == 4);
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
@@ -1478,8 +1472,7 @@
 }
 
 
-MaybeObject* KeyedLoadPropertyWithInterceptor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
   JSObject* receiver = JSObject::cast(args[0]);
   ASSERT(Smi::cast(args[1])->value() >= 0);
   uint32_t index = Smi::cast(args[1])->value();
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 793f581..c5dcf36 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -392,23 +392,24 @@
 
 
 // Support functions for IC stubs for callbacks.
-MaybeObject* LoadCallbackProperty(RUNTIME_CALLING_CONVENTION);
-MaybeObject* StoreCallbackProperty(RUNTIME_CALLING_CONVENTION);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
 
 
 // Support functions for IC stubs for interceptors.
-MaybeObject* LoadPropertyWithInterceptorOnly(RUNTIME_CALLING_CONVENTION);
-MaybeObject* LoadPropertyWithInterceptorForLoad(RUNTIME_CALLING_CONVENTION);
-MaybeObject* LoadPropertyWithInterceptorForCall(RUNTIME_CALLING_CONVENTION);
-MaybeObject* StoreInterceptorProperty(RUNTIME_CALLING_CONVENTION);
-MaybeObject* CallInterceptorProperty(RUNTIME_CALLING_CONVENTION);
-MaybeObject* KeyedLoadPropertyWithInterceptor(RUNTIME_CALLING_CONVENTION);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
 
 
 // The stub compiler compiles stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
+  StubCompiler()
+      : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
 
   MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
   MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
diff --git a/src/type-info.cc b/src/type-info.cc
index 78f693c..256f48a 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -355,6 +355,18 @@
 }
 
 
+void TypeFeedbackOracle::SetInfo(int position, Object* target) {
+  MaybeObject* maybe_result = dictionary_->AtNumberPut(position, target);
+  USE(maybe_result);
+#ifdef DEBUG
+  Object* result;
+  // Dictionary has been allocated with sufficient size for all elements.
+  ASSERT(maybe_result->ToObject(&result));
+  ASSERT(*dictionary_ == result);
+#endif
+}
+
+
 void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
   Isolate* isolate = Isolate::Current();
   HandleScope scope(isolate);
@@ -371,14 +383,14 @@
   int length = code_positions.length();
   ASSERT(source_positions.length() == length);
   for (int i = 0; i < length; i++) {
-    HandleScope loop_scope(isolate);
+    AssertNoAllocation no_allocation;
     RelocInfo info(code->instruction_start() + code_positions[i],
                    RelocInfo::CODE_TARGET, 0);
-    Handle<Code> target(Code::GetCodeFromTargetAddress(info.target_address()));
+    Code* target = Code::GetCodeFromTargetAddress(info.target_address());
     int position = source_positions[i];
     InlineCacheState state = target->ic_state();
     Code::Kind kind = target->kind();
-    Handle<Object> value;
+
     if (kind == Code::BINARY_OP_IC ||
         kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
         kind == Code::COMPARE_IC) {
@@ -387,35 +399,28 @@
       // recorded for all binary ICs.
       int entry = dictionary_->FindEntry(position);
       if (entry == NumberDictionary::kNotFound) {
-        value = target;
+        SetInfo(position, target);
       }
     } else if (state == MONOMORPHIC) {
       if (kind == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC ||
           kind == Code::KEYED_EXTERNAL_ARRAY_STORE_IC) {
-        value = target;
+        SetInfo(position, target);
       } else if (target->kind() != Code::CALL_IC ||
           target->check_type() == RECEIVER_MAP_CHECK) {
         Map* map = target->FindFirstMap();
         if (map == NULL) {
-          value = target;
+          SetInfo(position, target);
         } else {
-          value = Handle<Map>(map);
+          SetInfo(position, map);
         }
       } else {
         ASSERT(target->kind() == Code::CALL_IC);
         CheckType check = target->check_type();
         ASSERT(check != RECEIVER_MAP_CHECK);
-        value = Handle<Object>(Smi::FromInt(check));
+        SetInfo(position, Smi::FromInt(check));
       }
     } else if (state == MEGAMORPHIC) {
-      value = target;
-    }
-
-    if (!value.is_null()) {
-      Handle<NumberDictionary> new_dict =
-          isolate->factory()->DictionaryAtNumberPut(
-              dictionary_, position, value);
-      dictionary_ = loop_scope.CloseAndEscape(new_dict);
+      SetInfo(position, target);
     }
   }
   // Allocate handle in the parent scope.
diff --git a/src/type-info.h b/src/type-info.h
index c068489..9b69526 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -267,6 +267,8 @@
                                     Handle<String> name,
                                     Code::Flags flags);
 
+  void SetInfo(int position, Object* target);
+
   void PopulateMap(Handle<Code> code);
 
   void CollectPositions(Code* code,
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 04482e8..5e765b2 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -202,9 +202,6 @@
   SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
   SC(array_function_native, V8.ArrayFunctionNative)                   \
   SC(for_in, V8.ForIn)                                                \
-  SC(memcopy_aligned, V8.MemCopyAligned)                              \
-  SC(memcopy_unaligned, V8.MemCopyUnaligned)                          \
-  SC(memcopy_noxmm, V8.MemCopyNoXMM)                                  \
   SC(enum_cache_hits, V8.EnumCacheHits)                               \
   SC(enum_cache_misses, V8.EnumCacheMisses)                           \
   SC(zone_segment_bytes, V8.ZoneSegmentBytes)                         \
diff --git a/src/v8.cc b/src/v8.cc
index 8153372..f89ed83 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,6 +41,9 @@
 namespace v8 {
 namespace internal {
 
+static Mutex* init_once_mutex = OS::CreateMutex();
+static bool init_once_called = false;
+
 bool V8::is_running_ = false;
 bool V8::has_been_setup_ = false;
 bool V8::has_been_disposed_ = false;
@@ -49,6 +52,8 @@
 
 
 bool V8::Initialize(Deserializer* des) {
+  InitializeOncePerProcess();
+
   // The current thread may not yet had entered an isolate to run.
   // Note the Isolate::Current() may be non-null because for various
   // initialization purposes an initializing thread may be assigned an isolate
@@ -68,15 +73,6 @@
   Isolate* isolate = Isolate::Current();
   if (isolate->IsInitialized()) return true;
 
-#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
-  use_crankshaft_ = false;
-#else
-  use_crankshaft_ = FLAG_crankshaft;
-#endif
-
-  // Peephole optimization might interfere with deoptimization.
-  FLAG_peephole_optimization = !use_crankshaft_;
-
   is_running_ = true;
   has_been_setup_ = true;
   has_fatal_error_ = false;
@@ -171,8 +167,8 @@
 } double_int_union;
 
 
-Object* V8::FillHeapNumberWithRandom(Object* heap_number) {
-  uint64_t random_bits = Random(Isolate::Current());
+Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
+  uint64_t random_bits = Random(isolate);
   // Make a double* from address (heap_number + sizeof(double)).
   double_int_union* r = reinterpret_cast<double_int_union*>(
       reinterpret_cast<char*>(heap_number) +
@@ -188,4 +184,32 @@
   return heap_number;
 }
 
+
+void V8::InitializeOncePerProcess() {
+  ScopedLock lock(init_once_mutex);
+  if (init_once_called) return;
+  init_once_called = true;
+
+  // Setup the platform OS support.
+  OS::Setup();
+
+#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
+  use_crankshaft_ = false;
+#else
+  use_crankshaft_ = FLAG_crankshaft;
+#endif
+
+  if (Serializer::enabled()) {
+    use_crankshaft_ = false;
+  }
+
+  CPU::Setup();
+  if (!CPU::SupportsCrankshaft()) {
+    use_crankshaft_ = false;
+  }
+
+  // Peephole optimization might interfere with deoptimization.
+  FLAG_peephole_optimization = !use_crankshaft_;
+}
+
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index e7ca0d2..776fa9c 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -84,7 +84,6 @@
   static void TearDown();
   static bool IsRunning() { return is_running_; }
   static bool UseCrankshaft() { return use_crankshaft_; }
-  static void DisableCrankshaft() { use_crankshaft_ = false; }
   // To be dead you have to have lived
   // TODO(isolates): move IsDead to Isolate.
   static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
@@ -101,12 +100,15 @@
   // use a separate random state for internal random number
   // generation.
   static uint32_t RandomPrivate(Isolate* isolate);
-  static Object* FillHeapNumberWithRandom(Object* heap_number);
+  static Object* FillHeapNumberWithRandom(Object* heap_number,
+                                          Isolate* isolate);
 
   // Idle notification directly from the API.
   static bool IdleNotification();
 
  private:
+  static void InitializeOncePerProcess();
+
   // True if engine is currently running
   static bool is_running_;
   // True if V8 has ever been run
diff --git a/src/v8utils.h b/src/v8utils.h
index 0aa53ca..87c5e7f 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -254,51 +254,14 @@
 };
 
 
-// Custom memcpy implementation for platforms where the standard version
-// may not be good enough.
-#if defined(V8_TARGET_ARCH_IA32)
-
-// The default memcpy on ia32 architectures is generally not as efficient
-// as possible. (If any further ia32 platforms are introduced where the
-// memcpy function is efficient, exclude them from this branch).
-
-typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-
-// Implemented in codegen-<arch>.cc.
-MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-static inline void MemCopy(void* dest, const void* src, size_t size) {
-  static MemCopyFunction memcopy = CreateMemCopyFunction();
-  (*memcopy)(dest, src, size);
-#ifdef DEBUG
-  CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-
-// Limit below which the extra overhead of the MemCopy function is likely
-// to outweigh the benefits of faster copying.
-static const int kMinComplexMemCopy = 64;
-
-#else  // V8_TARGET_ARCH_IA32
-
-static inline void MemCopy(void* dest, const void* src, size_t size) {
-  memcpy(dest, src, size);
-}
-
-static const int kMinComplexMemCopy = 256;
-
-#endif  // V8_TARGET_ARCH_IA32
-
-
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
 static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
-    if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
-      MemCopy(dest, src, chars * sizeof(*dest));
+    if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
+      OS::MemCopy(dest, src, chars * sizeof(*dest));
       return;
     }
     // Number of characters in a uintptr_t.
diff --git a/src/version.cc b/src/version.cc
index 6104dac..2910635 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      6
+#define BUILD_NUMBER      7
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 439236a..9541a58 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -393,9 +393,9 @@
     StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
@@ -405,7 +405,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 0744b8a..de28ae9 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -38,22 +38,38 @@
 // -----------------------------------------------------------------------------
 // Implementation of CpuFeatures
 
-CpuFeatures::CpuFeatures()
-    : supported_(kDefaultCpuFeatures),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-void CpuFeatures::Probe(bool portable)  {
-  ASSERT(HEAP->HasBeenSetup());
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
   supported_ = kDefaultCpuFeatures;
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
 
-  Assembler assm(NULL, 0);
+  const int kBufferSize = 4 * KB;
+  VirtualMemory* memory = new VirtualMemory(kBufferSize);
+  if (!memory->IsReserved()) {
+    delete memory;
+    return;
+  }
+  ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+  if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+    delete memory;
+    return;
+  }
+
+  Assembler assm(NULL, memory->address(), kBufferSize);
   Label cpuid, done;
 #define __ assm.
   // Save old rsp, since we are going to modify the stack.
@@ -117,31 +133,20 @@
   __ ret(0);
 #undef __
 
-  CodeDesc desc;
-  assm.GetCode(&desc);
-  Isolate* isolate = Isolate::Current();
-  MaybeObject* maybe_code =
-      isolate->heap()->CreateCode(desc,
-                                  Code::ComputeFlags(Code::STUB),
-                                  Handle<Object>());
-  Object* code;
-  if (!maybe_code->ToObject(&code)) return;
-  if (!code->IsCode()) return;
-  PROFILE(isolate,
-          CodeCreateEvent(Logger::BUILTIN_TAG,
-                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
-  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
   supported_ = probe();
   found_by_runtime_probing_ = supported_;
   found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+  found_by_runtime_probing_ &= ~os_guarantees;
   // SSE2 and CMOV must be available on an X64 CPU.
   ASSERT(IsSupported(CPUID));
   ASSERT(IsSupported(SSE2));
   ASSERT(IsSupported(CMOV));
+
+  delete memory;
 }
 
 
@@ -339,8 +344,8 @@
 static void InitCoverageLog();
 #endif
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       code_targets_(100),
       positions_recorder_(this),
       emit_debug_code_(FLAG_debug_code) {
@@ -349,7 +354,7 @@
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
-      if (isolate()->assembler_spare_buffer() != NULL) {
+      if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
         buffer = isolate()->assembler_spare_buffer();
         isolate()->set_assembler_spare_buffer(NULL);
       }
@@ -393,7 +398,8 @@
 
 Assembler::~Assembler() {
   if (own_buffer_) {
-    if (isolate()->assembler_spare_buffer() == NULL &&
+    if (isolate() != NULL &&
+        isolate()->assembler_spare_buffer() == NULL &&
         buffer_size_ == kMinimalBufferSize) {
       isolate()->set_assembler_spare_buffer(buffer_);
     } else {
@@ -516,7 +522,8 @@
           reloc_info_writer.pos(), desc.reloc_size);
 
   // Switch buffers.
-  if (isolate()->assembler_spare_buffer() == NULL &&
+  if (isolate() != NULL &&
+      isolate()->assembler_spare_buffer() == NULL &&
       buffer_size_ == kMinimalBufferSize) {
     isolate()->set_assembler_spare_buffer(buffer_);
   } else {
@@ -1037,7 +1044,7 @@
 
 
 void Assembler::cpuid() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
+  ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit(0x0F);
@@ -2388,7 +2395,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_optional_rex_32(adr);
@@ -2398,7 +2405,7 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_optional_rex_32(adr);
@@ -2716,7 +2723,7 @@
 
 
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit(0x66);
@@ -2728,7 +2735,7 @@
 
 
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit(0x66);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 52aca63..f22f80b 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -434,14 +434,15 @@
 //   } else {
 //     // Generate standard x87 or SSE2 floating point code.
 //   }
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == SSE2 && !FLAG_enable_sse2) return false;
     if (f == SSE3 && !FLAG_enable_sse3) return false;
     if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -449,51 +450,65 @@
     if (f == SAHF && !FLAG_enable_sahf) return false;
     return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    uint64_t enabled = isolate->enabled_cpu_features();
+    return (enabled & (V8_UINT64_C(1) << f)) != 0;
   }
+#endif
+
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      uint64_t mask = (V8_UINT64_C(1) << f);
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      uint64_t mask = V8_UINT64_C(1) << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-          (cpu_features_->found_by_runtime_probing_ & mask) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= mask;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = isolate_->enabled_cpu_features();
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    uint64_t old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    uint64_t old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
- private:
-  CpuFeatures();
 
+ private:
   // Safe defaults include SSE2 and CMOV for X64. It is always available, if
   // anyone checks, but they shouldn't need to check.
   // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
   //   fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
   static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
 
-  uint64_t supported_;
-  uint64_t enabled_;
-  uint64_t found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static uint64_t supported_;
+  static uint64_t found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -526,7 +541,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0fb827b..12c0ec5 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2961,7 +2961,7 @@
                          times_1,
                          FixedArray::kHeaderSize));
     __ JumpIfSmi(probe, not_found);
-    ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
+    ASSERT(CpuFeatures::IsSupported(SSE2));
     CpuFeatures::Scope fscope(SSE2);
     __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
     __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 8c338fe..9cf85c4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -6438,8 +6438,13 @@
 
   // Return a random uint32 number in rax.
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(0);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  __ PrepareCallCFunction(1);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
   // Convert 32 random bits in rax to 0.(32 random bits) in a double
   // by computing:
@@ -8276,17 +8281,9 @@
     result = allocator()->Allocate();
     ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
 
-    // Cannot use r12 for receiver, because that changes
-    // the distance between a call and a fixup location,
-    // due to a special encoding of r12 as r/m in a ModR/M byte.
-    if (receiver.reg().is(r12)) {
-      frame()->Spill(receiver.reg());  // It will be overwritten with result.
-      // Swap receiver and value.
-      __ movq(result.reg(), receiver.reg());
-      Result temp = receiver;
-      receiver = result;
-      result = temp;
-    }
+    // r12 is now a reserved register, so it cannot be the receiver.
+    // If it was, the distance to the fixup location would not be constant.
+    ASSERT(!receiver.reg().is(r12));
 
     // Check that the receiver is a heap object.
     Condition is_smi = masm()->CheckSmi(receiver.reg());
@@ -8758,7 +8755,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler masm(buffer, static_cast<int>(actual_size));
+  Assembler masm(NULL, buffer, static_cast<int>(actual_size));
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
   // (e.g. the JavaScript nan-object).
@@ -8832,7 +8829,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  // Call the function from C++.
+  // Call the function from C++ through this pointer.
   return FUNCTION_CAST<ModuloFunction>(buffer);
 }
 
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index b49fb1c..e637ba1 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -42,10 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  Isolate::Current()->cpu_features()->Probe(true);
-  if (Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return true;  // Yay!
 }
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 2080c61..e33d061 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -663,23 +663,26 @@
   __ neg(arg5);
 
   // Allocate a new deoptimizer object.
-  __ PrepareCallCFunction(5);
+  __ PrepareCallCFunction(6);
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ movq(arg1, rax);
   __ movq(arg2, Immediate(type()));
   // Args 3 and 4 are already in the right registers.
 
-  // On windows put the argument on the stack (PrepareCallCFunction have
-  // created space for this). On linux pass the argument in r8.
+  // On windows put the arguments on the stack (PrepareCallCFunction
+  // has created space for this). On linux pass the arguments in r8 and r9.
 #ifdef _WIN64
   __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+  __ LoadAddress(arg5, ExternalReference::isolate_address());
+  __ movq(Operand(rsp, 5 * kPointerSize), arg5);
 #else
   __ movq(r8, arg5);
+  __ LoadAddress(r9, ExternalReference::isolate_address());
 #endif
 
   Isolate* isolate = masm()->isolate();
 
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -722,10 +725,11 @@
 
   // Compute the output frame in the deoptimizer.
   __ push(rax);
-  __ PrepareCallCFunction(1);
+  __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
+  __ LoadAddress(arg2, ExternalReference::isolate_address());
   __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+      ExternalReference::compute_output_frames_function(isolate), 2);
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 81be819..b14267c 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -99,7 +99,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 90afd85..4bf84a8 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1576,27 +1576,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(rax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2248,15 +2247,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the full code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2435,11 +2425,71 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  if (FLAG_debug_code) __ AbortIfSmi(rax);
+
+  // Check whether this map has already been checked to be safe for default
+  // valueOf.
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rbx, Map::kBitField2Offset),
+           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ j(not_zero, if_true);
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
+  __ j(equal, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ movq(rbx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+  __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+  // rbx: descriptor array
+  // rcx: length of descriptor array
+  // Calculate the end of the descriptor array.
+  SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
+  __ lea(rcx,
+         Operand(
+             rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+  // Calculate location of the first key name.
+  __ addq(rbx,
+          Immediate(FixedArray::kHeaderSize +
+                    DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(rdx, FieldOperand(rbx, 0));
+  __ Cmp(rdx, FACTORY->value_of_symbol());
+  __ j(equal, if_false);
+  __ addq(rbx, Immediate(kPointerSize));
+  __ bind(&entry);
+  __ cmpq(rbx, rcx);
+  __ j(not_equal, &loop);
+
+  // Reload map as register rbx was used as temporary above.
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+  __ testq(rcx, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+  __ cmpq(rcx,
+          ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, if_false);
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+         Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2693,8 +2743,13 @@
 
   // Return a random uint32 number in rax.
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(0);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  __ PrepareCallCFunction(1);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
   // Convert 32 random bits in rax to 0.(32 random bits) in a double
   // by computing:
@@ -3753,7 +3808,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailout(expr->increment(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 86a7e83..8e54f20 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2019,7 +2019,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   if (result.is(rax)) {
     __ load_rax(instr->hydrogen()->cell().location(),
@@ -2035,6 +2035,18 @@
 }
 
 
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(rax));
+  ASSERT(ToRegister(instr->result()).is(rax));
+
+  __ Move(rcx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index c47cd72..3d28b66 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1614,8 +1614,7 @@
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
         LOperand* xmm_temp =
-            (instr->CanTruncateToInt32() &&
-             Isolate::Current()->cpu_features()->IsSupported(SSE3))
+            (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1718,14 +1717,21 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), rax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
   LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
                                           TempRegister());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index e94debf..57ef30b 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -118,7 +118,8 @@
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -1245,10 +1246,25 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 654814c..3a90343 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -40,12 +40,15 @@
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
-      root_array_available_(true),
-      code_object_(isolate()->heap()->undefined_value()) {
+      root_array_available_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -2851,9 +2854,6 @@
   ASSERT(frame_alignment != 0);
   ASSERT(num_arguments >= 0);
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Make stack end at alignment and allocate space for arguments and old rsp.
   movq(kScratchRegister, rsp);
   ASSERT(IsPowerOf2(frame_alignment));
@@ -2873,26 +2873,6 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
-  // Pass current isolate address as additional parameter.
-  if (num_arguments < kRegisterPassedArguments) {
-#ifdef _WIN64
-    // First four arguments are passed in registers on Windows.
-    Register arg_to_reg[] = {rcx, rdx, r8, r9};
-#else
-    // First six arguments are passed in registers on other platforms.
-    Register arg_to_reg[] = {rdi, rsi, rdx, rcx, r8, r9};
-#endif
-    Register reg = arg_to_reg[num_arguments];
-    LoadAddress(reg, ExternalReference::isolate_address());
-  } else {
-    // Push Isolate pointer after all parameters.
-    int argument_slots_on_stack =
-        ArgumentStackSlotsForCFunctionCall(num_arguments);
-    LoadAddress(kScratchRegister, ExternalReference::isolate_address());
-    movq(Operand(rsp, argument_slots_on_stack * kPointerSize),
-         kScratchRegister);
-  }
-
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -2901,7 +2881,6 @@
   call(function);
   ASSERT(OS::ActivationFrameAlignment() != 0);
   ASSERT(num_arguments >= 0);
-  num_arguments += 1;
   int argument_slots_on_stack =
       ArgumentStackSlotsForCFunctionCall(num_arguments);
   movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
@@ -2909,7 +2888,9 @@
 
 
 CodePatcher::CodePatcher(byte* address, int size)
-    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+    : address_(address),
+      size_(size),
+      masm_(Isolate::Current(), address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1ee0fe0..9fde18d 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -74,7 +74,11 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // Prevent the use of the RootArray during the lifetime of this
   // scope object.
@@ -1029,7 +1033,10 @@
   // may be bigger than 2^16 - 1.  Requires a scratch register.
   void Ret(int bytes_dropped, Register scratch);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
   // Copy length bytes from source to destination.
   // Uses scratch register internally (if you have a low-eight register
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 269e7af..03f91fa 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -114,7 +114,7 @@
 RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
     Mode mode,
     int registers_to_save)
-    : masm_(NULL, kRegExpCodeSize),
+    : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
       no_root_array_scope_(&masm_),
       code_relative_fixup_positions_(4),
       mode_(mode),
@@ -402,13 +402,14 @@
 #endif
     __ push(backtrack_stackpointer());
 
-    static const int num_arguments = 3;
+    static const int num_arguments = 4;
     __ PrepareCallCFunction(num_arguments);
 
     // Put arguments into parameter registers. Parameters are
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
+    //   Isolate* isolate
 #ifdef _WIN64
     // Compute and set byte_offset1 (start of capture).
     __ lea(rcx, Operand(rsi, rdx, times_1, 0));
@@ -416,6 +417,8 @@
     __ lea(rdx, Operand(rsi, rdi, times_1, 0));
     // Set byte_length.
     __ movq(r8, rbx);
+    // Isolate.
+    __ LoadAddress(r9, ExternalReference::isolate_address());
 #else  // AMD64 calling convention
     // Compute byte_offset2 (current position = rsi+rdi).
     __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -425,6 +428,8 @@
     __ movq(rsi, rax);
     // Set byte_length.
     __ movq(rdx, rbx);
+    // Isolate.
+    __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
     ExternalReference compare =
         ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
@@ -919,16 +924,18 @@
 #endif
 
     // Call GrowStack(backtrack_stackpointer())
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments);
 #ifdef _WIN64
-    // Microsoft passes parameters in rcx, rdx.
+    // Microsoft passes parameters in rcx, rdx, r8.
     // First argument, backtrack stackpointer, is already in rcx.
     __ lea(rdx, Operand(rbp, kStackHighEnd));  // Second argument
+    __ LoadAddress(r8, ExternalReference::isolate_address());
 #else
-    // AMD64 ABI passes parameters in rdi, rsi.
+    // AMD64 ABI passes parameters in rdi, rsi, rdx.
     __ movq(rdi, backtrack_stackpointer());   // First argument.
     __ lea(rsi, Operand(rbp, kStackHighEnd));  // Second argument.
+    __ LoadAddress(rdx, ExternalReference::isolate_address());
 #endif
     ExternalReference grow_stack =
         ExternalReference::re_grow_stack(masm_.isolate());
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 516fc4a..17e83dc 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -97,6 +97,10 @@
   return ZONE->New(static_cast<int>(size));
 }
 
+void* ZoneObject::operator new(size_t size, Zone* zone) {
+  return zone->New(static_cast<int>(size));
+}
+
 
 inline void* ZoneListAllocationPolicy::New(int size) {
   return ZONE->New(size);
diff --git a/src/zone.h b/src/zone.h
index 13b55c4..9efe4f5 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -133,6 +133,7 @@
  public:
   // Allocate a new ZoneObject of 'size' bytes in the Zone.
   inline void* operator new(size_t size);
+  inline void* operator new(size_t size, Zone* zone);
 
   // Ideally, the delete operator should be private instead of
   // public, but unfortunately the compiler sometimes synthesizes
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 3d8157d..0ccf4b8 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -200,6 +200,7 @@
       size_t allocated = 0;
       void* base = Isolate::Current()->code_range()->
           AllocateRawMemory(requested, &allocated);
+      CHECK(base != NULL);
       blocks.Add(Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
       total_allocated += static_cast<int>(allocated);
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 33d505e..d5748ce 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -13629,3 +13629,102 @@
   context->DetachGlobal();
   define_property->Call(proxy, 0, NULL);
 }
+
+
+static void InstallContextId(v8::Handle<Context> context, int id) {
+  Context::Scope scope(context);
+  CompileRun("Object.prototype").As<Object>()->
+      Set(v8_str("context_id"), v8::Integer::New(id));
+}
+
+
+static void CheckContextId(v8::Handle<Object> object, int expected) {
+  CHECK_EQ(expected, object->Get(v8_str("context_id"))->Int32Value());
+}
+
+
+THREADED_TEST(CreationContext) {
+  HandleScope handle_scope;
+  Persistent<Context> context1 = Context::New();
+  InstallContextId(context1, 1);
+  Persistent<Context> context2 = Context::New();
+  InstallContextId(context2, 2);
+  Persistent<Context> context3 = Context::New();
+  InstallContextId(context3, 3);
+
+  Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New();
+
+  Local<Object> object1;
+  Local<Function> func1;
+  {
+    Context::Scope scope(context1);
+    object1 = Object::New();
+    func1 = tmpl->GetFunction();
+  }
+
+  Local<Object> object2;
+  Local<Function> func2;
+  {
+    Context::Scope scope(context2);
+    object2 = Object::New();
+    func2 = tmpl->GetFunction();
+  }
+
+  Local<Object> instance1;
+  Local<Object> instance2;
+
+  {
+    Context::Scope scope(context3);
+    instance1 = func1->NewInstance();
+    instance2 = func2->NewInstance();
+  }
+
+  CHECK(object1->CreationContext() == context1);
+  CheckContextId(object1, 1);
+  CHECK(func1->CreationContext() == context1);
+  CheckContextId(func1, 1);
+  CHECK(instance1->CreationContext() == context1);
+  CheckContextId(instance1, 1);
+  CHECK(object2->CreationContext() == context2);
+  CheckContextId(object2, 2);
+  CHECK(func2->CreationContext() == context2);
+  CheckContextId(func2, 2);
+  CHECK(instance2->CreationContext() == context2);
+  CheckContextId(instance2, 2);
+
+  {
+    Context::Scope scope(context1);
+    CHECK(object1->CreationContext() == context1);
+    CheckContextId(object1, 1);
+    CHECK(func1->CreationContext() == context1);
+    CheckContextId(func1, 1);
+    CHECK(instance1->CreationContext() == context1);
+    CheckContextId(instance1, 1);
+    CHECK(object2->CreationContext() == context2);
+    CheckContextId(object2, 2);
+    CHECK(func2->CreationContext() == context2);
+    CheckContextId(func2, 2);
+    CHECK(instance2->CreationContext() == context2);
+    CheckContextId(instance2, 2);
+  }
+
+  {
+    Context::Scope scope(context2);
+    CHECK(object1->CreationContext() == context1);
+    CheckContextId(object1, 1);
+    CHECK(func1->CreationContext() == context1);
+    CheckContextId(func1, 1);
+    CHECK(instance1->CreationContext() == context1);
+    CheckContextId(instance1, 1);
+    CHECK(object2->CreationContext() == context2);
+    CheckContextId(object2, 2);
+    CHECK(func2->CreationContext() == context2);
+    CheckContextId(func2, 2);
+    CHECK(instance2->CreationContext() == context2);
+    CheckContextId(instance2, 2);
+  }
+
+  context1.Dispose();
+  context2.Dispose();
+  context3.Dispose();
+}
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index a91886e..26f3ef9 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -58,7 +58,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
 
   __ add(r0, r0, Operand(r1));
   __ mov(pc, Operand(lr));
@@ -84,7 +84,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
   __ mov(r1, Operand(r0));
@@ -121,7 +121,7 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
   __ mov(r1, Operand(r0));
@@ -174,7 +174,7 @@
   } T;
   T t;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
   __ mov(ip, Operand(sp));
@@ -241,11 +241,11 @@
 
   // Create a function that accepts &t, and loads, manipulates, and stores
   // the doubles and floats.
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
   Label L, C;
 
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
 
     __ mov(ip, Operand(sp));
@@ -357,9 +357,9 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     // On entry, r0 = 0xAAAAAAAA = 0b10..10101010.
     __ ubfx(r0, r0, 1, 12);  // 0b00..010101010101 = 0x555
@@ -393,9 +393,9 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     __ usat(r1, 8, Operand(r0));           // Sat 0xFFFF to 0-255 = 0xFF.
     __ usat(r2, 12, Operand(r0, ASR, 9));  // Sat (0xFFFF>>9) to 0-4095 = 0x7F.
@@ -436,9 +436,9 @@
   InitializeVM();
   v8::HandleScope scope;
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
 
     Label wrong_exception;
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 694bd57..576739b 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -61,7 +61,7 @@
   v8::HandleScope scope;
 
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
 
   __ mov(eax, Operand(esp, 4));
   __ add(eax, Operand(esp, 8));
@@ -89,7 +89,7 @@
   v8::HandleScope scope;
 
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   Label L, C;
 
   __ mov(edx, Operand(esp, 4));
@@ -127,7 +127,7 @@
   v8::HandleScope scope;
 
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   Label L, C;
 
   __ mov(edx, Operand(esp, 4));
@@ -167,15 +167,15 @@
 typedef int (*F3)(float x);
 
 TEST(AssemblerIa323) {
-  if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
-
   InitializeVM();
+  if (!CpuFeatures::IsSupported(SSE2)) return;
+
   v8::HandleScope scope;
 
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
 
-  CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
+  CHECK(CpuFeatures::IsSupported(SSE2));
   { CpuFeatures::Scope fscope(SSE2);
     __ cvttss2si(eax, Operand(esp, 4));
     __ ret(0);
@@ -202,15 +202,15 @@
 typedef int (*F4)(double x);
 
 TEST(AssemblerIa324) {
-  if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
-
   InitializeVM();
+  if (!CpuFeatures::IsSupported(SSE2)) return;
+
   v8::HandleScope scope;
 
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
 
-  CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
+  CHECK(CpuFeatures::IsSupported(SSE2));
   CpuFeatures::Scope fscope(SSE2);
   __ cvttsd2si(eax, Operand(esp, 4));
   __ ret(0);
@@ -239,7 +239,7 @@
   v8::HandleScope scope;
 
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
 
   __ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE));
   __ ret(0);
@@ -259,14 +259,14 @@
 typedef double (*F5)(double x, double y);
 
 TEST(AssemblerIa326) {
-  if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
-
   InitializeVM();
+  if (!CpuFeatures::IsSupported(SSE2)) return;
+
   v8::HandleScope scope;
-  CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
+  CHECK(CpuFeatures::IsSupported(SSE2));
   CpuFeatures::Scope fscope(SSE2);
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
 
   __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
   __ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
@@ -305,14 +305,14 @@
 typedef double (*F6)(int x);
 
 TEST(AssemblerIa328) {
-  if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) return;
-
   InitializeVM();
+  if (!CpuFeatures::IsSupported(SSE2)) return;
+
   v8::HandleScope scope;
-  CHECK(Isolate::Current()->cpu_features()->IsSupported(SSE2));
+  CHECK(CpuFeatures::IsSupported(SSE2));
   CpuFeatures::Scope fscope(SSE2);
   v8::internal::byte buffer[256];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   __ mov(eax, Operand(esp, 4));
   __ cvtsi2sd(xmm0, Operand(eax));
   // Copy xmm0 to st(0) using eight bytes of stack.
@@ -345,7 +345,7 @@
   InitializeVM();
   v8::HandleScope scope;
   v8::internal::byte buffer[256];
-  MacroAssembler assm(buffer, sizeof buffer);
+  MacroAssembler assm(Isolate::Current(), buffer, sizeof buffer);
   enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
   Label equal_l, less_l, greater_l, nan_l;
   __ fld_d(Operand(esp, 3 * kPointerSize));
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index 7e2115a..ea70f54 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -35,30 +35,30 @@
 #include "serialize.h"
 #include "cctest.h"
 
-using v8::internal::byte;
-using v8::internal::OS;
 using v8::internal::Assembler;
-using v8::internal::Operand;
-using v8::internal::Immediate;
-using v8::internal::Label;
-using v8::internal::rax;
-using v8::internal::rsi;
-using v8::internal::rdi;
-using v8::internal::rcx;
-using v8::internal::rdx;
-using v8::internal::rbp;
-using v8::internal::rsp;
-using v8::internal::r8;
-using v8::internal::r9;
-using v8::internal::r13;
-using v8::internal::r15;
-using v8::internal::times_1;
-
-using v8::internal::FUNCTION_CAST;
 using v8::internal::CodeDesc;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::Immediate;
+using v8::internal::Isolate;
+using v8::internal::Label;
+using v8::internal::OS;
+using v8::internal::Operand;
+using v8::internal::byte;
+using v8::internal::greater;
 using v8::internal::less_equal;
 using v8::internal::not_equal;
-using v8::internal::greater;
+using v8::internal::r13;
+using v8::internal::r15;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::rax;
+using v8::internal::rbp;
+using v8::internal::rcx;
+using v8::internal::rdi;
+using v8::internal::rdx;
+using v8::internal::rsi;
+using v8::internal::rsp;
+using v8::internal::times_1;
 
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
@@ -93,7 +93,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
   // Assemble a simple function that copies argument 2 and returns it.
   __ movq(rax, arg2);
@@ -115,7 +115,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
   // Assemble a simple function that copies argument 2 and returns it.
   // We compile without stack frame pointers, so the gdb debugger shows
@@ -147,7 +147,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
   // Assemble a simple function that adds arguments returning the sum.
   __ movq(rax, arg2);
@@ -169,7 +169,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
   // Assemble a simple function that multiplies arguments returning the high
   // word.
@@ -197,7 +197,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
   // Assemble a simple function that copies argument 2 and returns it.
   __ push(rbp);
@@ -231,7 +231,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
 
   // Assemble a simple function that copies argument 1 and returns it.
   __ push(rbp);
@@ -260,7 +260,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler assm(buffer, static_cast<int>(actual_size));
+  Assembler assm(Isolate::Current(), buffer, static_cast<int>(actual_size));
   // Assemble two loops using rax as counter, and verify the ending counts.
   Label Fail;
   __ movq(rax, Immediate(-3));
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 3221614..02daff8 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -72,11 +72,11 @@
 // Setup V8 to a state where we can at least run the assembler and
 // disassembler. Declare the variables and allocate the data structures used
 // in the rest of the macros.
-#define SETUP() \
-  InitializeVM(); \
-  v8::HandleScope scope; \
+#define SETUP()                                           \
+  InitializeVM();                                         \
+  v8::HandleScope scope;                                  \
   byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
-  Assembler assm(buffer, 4*1024); \
+  Assembler assm(Isolate::Current(), buffer, 4*1024);     \
   bool failure = false;
 
 
@@ -270,7 +270,7 @@
           "13a06000       movne r6, #0");
 
   // mov -> movw.
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
             "13015234       movwne r5, #4660");
     // We only disassemble one instruction so the eor instruction is not here.
@@ -360,7 +360,7 @@
 TEST(Type3) {
   SETUP();
 
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     COMPARE(ubfx(r0, r1, 5, 10),
             "e7e902d1       ubfx r0, r1, #5, #10");
     COMPARE(ubfx(r1, r0, 5, 10),
@@ -415,7 +415,7 @@
 TEST(Vfp) {
   SETUP();
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     COMPARE(vmov(d0, d1),
             "eeb00b41       vmov.f64 d0, d1");
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 26da5c9..cb735c7 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -58,7 +58,7 @@
   InitializeVM();
   v8::HandleScope scope;
   v8::internal::byte buffer[2048];
-  Assembler assm(buffer, sizeof buffer);
+  Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   DummyStaticFunction(NULL);  // just bloody use it (DELETE; debugging)
 
   // Short immediate instructions
@@ -107,12 +107,12 @@
   __ xor_(edx, 3);
   __ nop();
   {
-    CHECK(Isolate::Current()->cpu_features()->IsSupported(CPUID));
+    CHECK(CpuFeatures::IsSupported(CPUID));
     CpuFeatures::Scope fscope(CPUID);
     __ cpuid();
   }
   {
-    CHECK(Isolate::Current()->cpu_features()->IsSupported(RDTSC));
+    CHECK(CpuFeatures::IsSupported(RDTSC));
     CpuFeatures::Scope fscope(RDTSC);
     __ rdtsc();
   }
@@ -375,7 +375,7 @@
   __ fwait();
   __ nop();
   {
-    if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
       __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
@@ -397,7 +397,7 @@
 
   // cmov.
   {
-    if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
+    if (CpuFeatures::IsSupported(CMOV)) {
       CpuFeatures::Scope use_cmov(CMOV);
       __ cmov(overflow, eax, Operand(eax, 0));
       __ cmov(no_overflow, eax, Operand(eax, 1));
@@ -420,7 +420,7 @@
 
   // andpd, cmpltsd, movaps, psllq, psrlq, por.
   {
-    if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ andpd(xmm0, xmm1);
       __ andpd(xmm1, xmm2);
@@ -449,7 +449,7 @@
   }
 
   {
-    if (Isolate::Current()->cpu_features()->IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
       __ pextrd(Operand(eax), xmm0, 1);
       __ pinsrd(xmm1, Operand(eax), 0);
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 141b42f..bd08d4c 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -574,10 +574,10 @@
 
   // Find references to code.
   const v8::HeapGraphNode* compiled_code =
-      GetProperty(compiled, v8::HeapGraphEdge::kInternal, "code");
+      GetProperty(compiled, v8::HeapGraphEdge::kInternal, "shared");
   CHECK_NE(NULL, compiled_code);
   const v8::HeapGraphNode* lazy_code =
-      GetProperty(lazy, v8::HeapGraphEdge::kInternal, "code");
+      GetProperty(lazy, v8::HeapGraphEdge::kInternal, "shared");
   CHECK_NE(NULL, lazy_code);
 
   // Verify that non-compiled code doesn't contain references to "x"
@@ -1257,9 +1257,9 @@
       ccc, v8::HeapGraphNode::kString, "CCC");
   CHECK_NE(NULL, n_CCC);
 
-  CHECK_EQ(aaa, GetProperty(n_AAA, v8::HeapGraphEdge::kInternal, "Native"));
-  CHECK_EQ(aaa, GetProperty(n_BBB, v8::HeapGraphEdge::kInternal, "Native"));
-  CHECK_EQ(ccc, GetProperty(n_CCC, v8::HeapGraphEdge::kInternal, "Native"));
+  CHECK_EQ(aaa, GetProperty(n_AAA, v8::HeapGraphEdge::kInternal, "native"));
+  CHECK_EQ(aaa, GetProperty(n_BBB, v8::HeapGraphEdge::kInternal, "native"));
+  CHECK_EQ(ccc, GetProperty(n_CCC, v8::HeapGraphEdge::kInternal, "native"));
 }
 
 
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 86860ce..09aa613 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -70,7 +70,7 @@
   // Test FindCodeObject
 #define __ assm.
 
-  Assembler assm(NULL, 0);
+  Assembler assm(Isolate::Current(), NULL, 0);
 
   __ nop();  // supported on all architectures
 
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index c7c67b0..59eeed9 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -35,48 +35,49 @@
 #include "serialize.h"
 #include "cctest.h"
 
-using v8::internal::byte;
-using v8::internal::OS;
 using v8::internal::Assembler;
+using v8::internal::CodeDesc;
 using v8::internal::Condition;
-using v8::internal::MacroAssembler;
+using v8::internal::FUNCTION_CAST;
 using v8::internal::HandleScope;
-using v8::internal::Operand;
 using v8::internal::Immediate;
-using v8::internal::SmiIndex;
+using v8::internal::Isolate;
 using v8::internal::Label;
+using v8::internal::MacroAssembler;
+using v8::internal::OS;
+using v8::internal::Operand;
 using v8::internal::RelocInfo;
-using v8::internal::rax;
-using v8::internal::rbx;
-using v8::internal::rsi;
-using v8::internal::rdi;
-using v8::internal::rcx;
-using v8::internal::rdx;
-using v8::internal::rbp;
-using v8::internal::rsp;
-using v8::internal::r8;
-using v8::internal::r9;
+using v8::internal::Smi;
+using v8::internal::SmiIndex;
+using v8::internal::byte;
+using v8::internal::carry;
+using v8::internal::greater;
+using v8::internal::greater_equal;
+using v8::internal::kIntSize;
+using v8::internal::kPointerSize;
+using v8::internal::kSmiTagMask;
+using v8::internal::kSmiValueSize;
+using v8::internal::less_equal;
+using v8::internal::negative;
+using v8::internal::not_carry;
+using v8::internal::not_equal;
+using v8::internal::not_zero;
+using v8::internal::positive;
 using v8::internal::r11;
 using v8::internal::r13;
 using v8::internal::r14;
 using v8::internal::r15;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::rax;
+using v8::internal::rbp;
+using v8::internal::rbx;
+using v8::internal::rcx;
+using v8::internal::rdi;
+using v8::internal::rdx;
+using v8::internal::rsi;
+using v8::internal::rsp;
 using v8::internal::times_pointer_size;
-using v8::internal::FUNCTION_CAST;
-using v8::internal::CodeDesc;
-using v8::internal::less_equal;
-using v8::internal::not_equal;
-using v8::internal::not_zero;
-using v8::internal::greater;
-using v8::internal::greater_equal;
-using v8::internal::carry;
-using v8::internal::not_carry;
-using v8::internal::negative;
-using v8::internal::positive;
-using v8::internal::Smi;
-using v8::internal::kSmiTagMask;
-using v8::internal::kSmiValueSize;
-using v8::internal::kPointerSize;
-using v8::internal::kIntSize;
 
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
@@ -157,7 +158,9 @@
                                                    true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
   MacroAssembler* masm = &assembler;  // Create a pointer for the __ macro.
   masm->set_allow_stub_calls(false);
   EntryCode(masm);
@@ -245,7 +248,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -295,7 +300,9 @@
                                                  true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -423,7 +430,9 @@
                                                  true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -467,7 +476,9 @@
                                                    true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -715,7 +726,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -803,7 +816,9 @@
                                                  true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -993,7 +1008,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1083,7 +1100,9 @@
                                                  true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1189,7 +1208,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1299,7 +1320,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1395,7 +1418,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1464,7 +1489,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);  // Avoid inline checks.
@@ -1543,7 +1570,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1624,7 +1653,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1707,7 +1738,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1774,7 +1807,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1870,7 +1905,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -1976,7 +2013,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -2045,7 +2084,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -2109,7 +2150,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
@@ -2152,7 +2195,9 @@
                                       true));
   CHECK(buffer);
   HandleScope handles;
-  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+  MacroAssembler assembler(Isolate::Current(),
+                           buffer,
+                           static_cast<int>(actual_size));
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 018018a..ce53f8e 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -89,8 +89,8 @@
   memset(dst.start(), 0xFF, dst.length());
   byte* to = dst.start() + 32 + destination_alignment;
   byte* from = src.start() + source_alignment;
-  int length = kMinComplexMemCopy + length_alignment;
-  MemCopy(to, from, static_cast<size_t>(length));
+  int length = OS::kMinComplexMemCopy + length_alignment;
+  OS::MemCopy(to, from, static_cast<size_t>(length));
   printf("[%d,%d,%d]\n",
          source_alignment, destination_alignment, length_alignment);
   for (int i = 0; i < length; i++) {
@@ -103,8 +103,9 @@
 
 
 TEST(MemCopy) {
+  v8::V8::Initialize();
   OS::Setup();
-  const int N = kMinComplexMemCopy + 128;
+  const int N = OS::kMinComplexMemCopy + 128;
   Vector<byte> buffer1 = Vector<byte>::New(N);
   Vector<byte> buffer2 = Vector<byte>::New(N);
 
diff --git a/test/mjsunit/compiler/global-accessors.js b/test/mjsunit/compiler/global-accessors.js
new file mode 100644
index 0000000..bd031a8
--- /dev/null
+++ b/test/mjsunit/compiler/global-accessors.js
@@ -0,0 +1,47 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This test tests that no bailouts are missing by not hitting asserts in debug
+// mode. 
+
+test_count_operation()
+test_compound_assignment()
+
+function f() {}
+function test_count_operation()
+{
+  this.__defineSetter__('x', f);
+  this.__defineGetter__('x', f);
+  x = x++;
+}
+
+function test_compound_assignment()
+{
+  this.__defineSetter__('y', f);
+  this.__defineGetter__('y', f);
+  y += y;
+}
diff --git a/test/mjsunit/regress/regress-1229.js b/test/mjsunit/regress/regress-1229.js
new file mode 100644
index 0000000..4afb964
--- /dev/null
+++ b/test/mjsunit/regress/regress-1229.js
@@ -0,0 +1,79 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Check that %NewObjectFromBound works correctly when called from optimized
+// frame.
+function foo(x, y, z) {
+  assertEquals(1, x);
+  assertEquals(2, y);
+  assertEquals(3, z);
+}
+
+var bound_arg = [1];
+
+function f(y, z) {
+  return %NewObjectFromBound(foo, bound_arg);
+}
+
+// Check that %NewObjectFromBound looks at correct frame for inlined function.
+function g(z, y) {
+  return f(y, z); /* f should be inlined into g, note rotated arguments */
+}
+
+// Check that %NewObjectFromBound looks at correct frame for inlined function.
+function ff(x) { }
+function h(z2, y2) {
+  var local_z = z2 >> 1;
+  ff(local_z);
+  var local_y = y2 >> 1;
+  ff(local_y);
+  return f(local_y, local_z); /* f should be inlined into h */
+}
+
+for (var i = 0; i < 100000; i++) f(2, 3);
+
+for (var i = 0; i < 100000; i++) g(3, 2);
+
+for (var i = 0; i < 100000; i++) h(6, 4);
+
+// Check that %_IsConstructCall returns correct value when inlined
+var NON_CONSTRUCT_MARKER = {};
+var CONSTRUCT_MARKER = {};
+function baz() {
+  return (!%_IsConstructCall()) ? NON_CONSTRUCT_MARKER : CONSTRUCT_MARKER;
+}
+
+function bar(x, y, z) {
+  var non_construct = baz(); /* baz should be inlined */
+  assertEquals(non_construct, NON_CONSTRUCT_MARKER);
+  var construct = new baz();
+  assertEquals(construct, CONSTRUCT_MARKER);
+}
+
+for (var i = 0; i < 100000; i++) new bar(1, 2, 3);
diff --git a/test/preparser/empty.js b/test/preparser/empty.js
new file mode 100644
index 0000000..70b88e2
--- /dev/null
+++ b/test/preparser/empty.js
@@ -0,0 +1,28 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains no JavaScript code.
diff --git a/test/preparser/functions-only.js b/test/preparser/functions-only.js
new file mode 100644
index 0000000..4dcde57
--- /dev/null
+++ b/test/preparser/functions-only.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains no identifiers or string literals, but does contain
+// symbols.
+
+(function () {
+  if (this != null) {
+    return this;
+  }
+  while (true) {
+    if ([][2]) return false;
+  }
+})({}, function() { return [true]; } );
diff --git a/test/preparser/non-alphanum.js b/test/preparser/non-alphanum.js
new file mode 100644
index 0000000..83bd1f8
--- /dev/null
+++ b/test/preparser/non-alphanum.js
@@ -0,0 +1,34 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains no symbols or function declarations, and only
+// non-alphanumeric characters, but does contain valid code.
+
+// Created using http://discogscounter.getfreehosting.co.uk/js-noalnum_com.php
+// Probably only works in Firefox, but should parse fine.
+
+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]])([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(![]+[])[+!+[]]]((![]+[])[+!+[]])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+(+[![]]+[])[+[]])[+[]]+(![]+[])[+!+[]]+(+[]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[!+[]+!+[]+!+[]+[+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[+!+[]]+(+[![]]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+!+[]]]+([]+([]+[])[([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][[]]+[])[+!+[]]+(![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[+!+[]]+([][[]]+[])[+[]]+([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]])[+!+[]+[!+[]+!+[]+!+[]+!+[]]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[!+[]+!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((![]+[])[+!+[]]+[+[]])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+([][[]]+[])[!+[]+!+[]]+[][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()[(![]+[])[+!+[]]+(!![]+[])[+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+([][(![]+[])[!+[]+!+[]+!+[]]+(!![]+[][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]])[+!+[]+[+[]]]+(!![]+[])[+!+[]]+(!![]+[])[+[]]][([][(![]+[])[+[]]+(![]+[]+[][[]])[+!+[]+[+[]]]+(![]+[])[!+[]+!+[]]+(!![]+[])[+[]]+(!![]+[])[!+[]+!+[]+!+[]]+(!![]+[])[+!+[]]]+[])[!+[]+!+[]+!+[]]+(![]+[])[+!+[]]+(![]+[])[!+[]+!+[]]+(![]+[])[!+[]+!+[]]]()+[])[!+[]+!+[]]]((+(+!+[]+(!+[]+[])[!+[]+!+[]+!+[]]+[+!+[]]+[+[]]+[+[]]+[+[]])+[])[+[]]+(![]+[])[+[]])[+[]])
diff --git a/test/preparser/symbols-only.js b/test/preparser/symbols-only.js
new file mode 100644
index 0000000..b652063
--- /dev/null
+++ b/test/preparser/symbols-only.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file contains no function declarations.
+
+var x = 42;
+var y = "hello world";
+if (x == y) {
+  with ({ x: 10, y: "20", z: 42 }) {
+    print(z);
+  }
+}
+try {
+  x = 2;
+  throw y;
+  y = 4;
+} catch (e) {
+  y = e;
+} finally {
+  x = y;
+}
+for (var i = 0; i < 10; i++) {
+  x += x;
+}
+print(y);
diff --git a/test/preparser/testcfg.py b/test/preparser/testcfg.py
new file mode 100644
index 0000000..c78d03b
--- /dev/null
+++ b/test/preparser/testcfg.py
@@ -0,0 +1,90 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import test
+import os
+from os.path import join, dirname, exists
+import platform
+import utils
+
+
+class PreparserTestCase(test.TestCase):
+
+  def __init__(self, root, path, executable, mode, context):
+    super(PreparserTestCase, self).__init__(context, path, mode)
+    self.executable = executable
+    self.root = root
+
+  def GetLabel(self):
+    return "%s %s %s" % (self.mode, self.path[-2], self.path[-1])
+
+  def GetName(self):
+    return self.path[-1]
+
+  def BuildCommand(self, path):
+    testfile = join(self.root, self.GetName()) + ".js"
+    result = [self.executable, testfile]
+    return result
+
+  def GetCommand(self):
+    return self.BuildCommand(self.path)
+
+  def Run(self):
+    return test.TestCase.Run(self)
+
+
+class PreparserTestConfiguration(test.TestConfiguration):
+
+  def __init__(self, context, root):
+    super(PreparserTestConfiguration, self).__init__(context, root)
+
+  def GetBuildRequirements(self):
+    return ['preparser']
+
+  def ListTests(self, current_path, path, mode, variant_flags):
+    executable = join('obj', 'preparser', mode, 'preparser')
+    if utils.IsWindows():
+      executable += '.exe'
+    executable = join(self.context.buildspace, executable)
+    # Find all .js files in tests/preparser directory.
+    filenames = [f[:-3] for f in os.listdir(self.root) if f.endswith(".js")]
+    filenames.sort()
+    result = []
+    for file in filenames:
+      result.append(PreparserTestCase(self.root,
+                                      current_path + [file], executable,
+                                      mode, self.context))
+    return result
+
+  def GetTestStatus(self, sections, defs):
+    status_file = join(self.root, 'preparser.status')
+    if exists(status_file):
+      test.ReadConfigurationInto(status_file, sections, defs)
+
+
+def GetConfiguration(context, root):
+  return PreparserTestConfiguration(context, root)
diff --git a/tools/test.py b/tools/test.py
index 066a559..707e725 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -379,6 +379,7 @@
 
   def Run(self):
     self.BeforeRun()
+    result = "exception"
     try:
       result = self.RunCommand(self.GetCommand())
     finally:
@@ -583,7 +584,9 @@
 
 # Use this to run several variants of the tests, e.g.:
 # VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
-VARIANT_FLAGS = [[], ['--stress-opt', '--always-opt'], ['--nocrankshaft']]
+VARIANT_FLAGS = [[],
+                 ['--stress-opt', '--always-opt'],
+                 ['--nocrankshaft']]
 
 
 class TestRepository(TestSuite):
@@ -1316,7 +1319,7 @@
     return ExpandCommand
 
 
-BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message']
+BUILT_IN_TESTS = ['mjsunit', 'cctest', 'message', 'preparser']
 
 
 def GetSuites(test_root):
@@ -1409,9 +1412,6 @@
   globally_unused_rules = None
   for path in paths:
     for mode in options.mode:
-      if not exists(context.GetVm(mode)):
-        print "Can't find shell executable: '%s'" % context.GetVm(mode)
-        continue
       env = {
         'mode': mode,
         'system': utils.GuessOS(),
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 7a05ef1..9d6bfb6 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -345,7 +345,6 @@
   return this.stateFilter_ == null || this.stateFilter_ == vmState;
 };
 
-
 TickProcessor.prototype.processTick = function(pc,
                                                sp,
                                                is_external_callback,
@@ -361,8 +360,10 @@
   if (is_external_callback) {
     // Don't use PC when in external callback code, as it can point
     // inside callback's code, and we will erroneously report
-    // that a callback calls itself.
-    pc = 0;
+    // that a callback calls itself. Instead we use tos_or_external_callback,
+    // as simply resetting PC will produce unaccounted ticks.
+    pc = tos_or_external_callback;
+    tos_or_external_callback = 0;
   } else if (tos_or_external_callback) {
     // Find out, if top of stack was pointing inside a JS function
     // meaning that we have encountered a frameless invocation.