Merge V8 at r7668: Initial merge by Git.

Change-Id: I1703c8b4f5c63052451a22cf3fb878abc9a0ec75
diff --git a/src/SConscript b/src/SConscript
index a740584..417e283 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -31,6 +31,7 @@
 sys.path.append(join(root_dir, 'tools'))
 import js2c
 Import('context')
+Import('tools')
 
 
 SOURCES = {
@@ -86,7 +87,6 @@
     interpreter-irregexp.cc
     isolate.cc
     jsregexp.cc
-    jump-target.cc
     lithium-allocator.cc
     lithium.cc
     liveedit.cc
@@ -106,7 +106,6 @@
     regexp-macro-assembler-irregexp.cc
     regexp-macro-assembler.cc
     regexp-stack.cc
-    register-allocator.cc
     rewriter.cc
     runtime.cc
     runtime-profiler.cc
@@ -132,14 +131,11 @@
     v8threads.cc
     variables.cc
     version.cc
-    virtual-frame.cc
     zone.cc
     extensions/gc-extension.cc
     extensions/externalize-string-extension.cc
     """),
   'arch:arm': Split("""
-    jump-target-light.cc
-    virtual-frame-light.cc
     arm/builtins-arm.cc
     arm/code-stubs-arm.cc
     arm/codegen-arm.cc
@@ -151,20 +147,15 @@
     arm/frames-arm.cc
     arm/full-codegen-arm.cc
     arm/ic-arm.cc
-    arm/jump-target-arm.cc
     arm/lithium-arm.cc
     arm/lithium-codegen-arm.cc
     arm/lithium-gap-resolver-arm.cc
     arm/macro-assembler-arm.cc
     arm/regexp-macro-assembler-arm.cc
-    arm/register-allocator-arm.cc
     arm/stub-cache-arm.cc
-    arm/virtual-frame-arm.cc
     arm/assembler-arm.cc
     """),
   'arch:mips': Split("""
-    jump-target-light.cc
-    virtual-frame-light.cc
     mips/assembler-mips.cc
     mips/builtins-mips.cc
     mips/code-stubs-mips.cc
@@ -177,16 +168,11 @@
     mips/frames-mips.cc
     mips/full-codegen-mips.cc
     mips/ic-mips.cc
-    mips/jump-target-mips.cc
     mips/macro-assembler-mips.cc
     mips/regexp-macro-assembler-mips.cc
-    mips/register-allocator-mips.cc
     mips/stub-cache-mips.cc
-    mips/virtual-frame-mips.cc
     """),
   'arch:ia32': Split("""
-    jump-target-heavy.cc
-    virtual-frame-heavy.cc
     ia32/assembler-ia32.cc
     ia32/builtins-ia32.cc
     ia32/code-stubs-ia32.cc
@@ -198,19 +184,14 @@
     ia32/frames-ia32.cc
     ia32/full-codegen-ia32.cc
     ia32/ic-ia32.cc
-    ia32/jump-target-ia32.cc
     ia32/lithium-codegen-ia32.cc
     ia32/lithium-gap-resolver-ia32.cc
     ia32/lithium-ia32.cc
     ia32/macro-assembler-ia32.cc
     ia32/regexp-macro-assembler-ia32.cc
-    ia32/register-allocator-ia32.cc
     ia32/stub-cache-ia32.cc
-    ia32/virtual-frame-ia32.cc
     """),
   'arch:x64': Split("""
-    jump-target-heavy.cc
-    virtual-frame-heavy.cc
     x64/assembler-x64.cc
     x64/builtins-x64.cc
     x64/code-stubs-x64.cc
@@ -222,15 +203,12 @@
     x64/frames-x64.cc
     x64/full-codegen-x64.cc
     x64/ic-x64.cc
-    x64/jump-target-x64.cc
     x64/lithium-codegen-x64.cc
     x64/lithium-gap-resolver-x64.cc
     x64/lithium-x64.cc
     x64/macro-assembler-x64.cc
     x64/regexp-macro-assembler-x64.cc
-    x64/register-allocator-x64.cc
     x64/stub-cache-x64.cc
-    x64/virtual-frame-x64.cc
     """),
   'simulator:arm': ['arm/simulator-arm.cc'],
   'simulator:mips': ['mips/simulator-mips.cc'],
@@ -319,13 +297,18 @@
 '''.split()
 
 
+EXPERIMENTAL_LIBRARY_FILES = '''
+proxy.js
+'''.split()
+
+
 def Abort(message):
   print message
   sys.exit(1)
 
 
 def ConfigureObjectFiles():
-  env = Environment()
+  env = Environment(tools=tools)
   env.Replace(**context.flags['v8'])
   context.ApplyEnvOverrides(env)
   env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
@@ -346,9 +329,16 @@
   # compile it.
   library_files = [s for s in LIBRARY_FILES]
   library_files.append('macros.py')
-  libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
+  libraries_src = env.JS2C(['libraries.cc'], library_files, TYPE='CORE')
   libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
 
+  # Combine the experimental JavaScript library files into a C++ file
+  # and compile it.
+  experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
+  experimental_library_files.append('macros.py')
+  experimental_libraries_src = env.JS2C(['experimental-libraries.cc'], experimental_library_files, TYPE='EXPERIMENTAL')
+  experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
+
   source_objs = context.ConfigureObject(env, source_files)
   non_snapshot_files = [source_objs]
 
@@ -365,7 +355,7 @@
   mksnapshot_env = env.Copy()
   mksnapshot_env.Replace(**context.flags['mksnapshot'])
   mksnapshot_src = 'mksnapshot.cc'
-  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
+  mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj,  non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
   if context.use_snapshot:
     if context.build_snapshot:
       snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
@@ -374,7 +364,7 @@
     snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
   else:
     snapshot_obj = empty_snapshot_obj
-  library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
+  library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
   return (library_objs, d8_objs, [mksnapshot], preparser_objs)
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index e33b4d7..5f9bf74 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -568,172 +568,6 @@
 // Accessors::FunctionArguments
 //
 
-static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
-  if (slot_index >= 0) {
-    const int offset = JavaScriptFrameConstants::kLocal0Offset;
-    return frame->fp() + offset - (slot_index * kPointerSize);
-  } else {
-    const int offset = JavaScriptFrameConstants::kReceiverOffset;
-    return frame->caller_sp() + offset + (slot_index * kPointerSize);
-  }
-}
-
-
-// We can't intermix stack decoding and allocations because
-// deoptimization infrastracture is not GC safe.
-// Thus we build a temporary structure in malloced space.
-class SlotRef BASE_EMBEDDED {
- public:
-  enum SlotRepresentation {
-    UNKNOWN,
-    TAGGED,
-    INT32,
-    DOUBLE,
-    LITERAL
-  };
-
-  SlotRef()
-      : addr_(NULL), representation_(UNKNOWN) { }
-
-  SlotRef(Address addr, SlotRepresentation representation)
-      : addr_(addr), representation_(representation) { }
-
-  explicit SlotRef(Object* literal)
-      : literal_(literal), representation_(LITERAL) { }
-
-  Handle<Object> GetValue() {
-    switch (representation_) {
-      case TAGGED:
-        return Handle<Object>(Memory::Object_at(addr_));
-
-      case INT32: {
-        int value = Memory::int32_at(addr_);
-        if (Smi::IsValid(value)) {
-          return Handle<Object>(Smi::FromInt(value));
-        } else {
-          return Isolate::Current()->factory()->NewNumberFromInt(value);
-        }
-      }
-
-      case DOUBLE: {
-        double value = Memory::double_at(addr_);
-        return Isolate::Current()->factory()->NewNumber(value);
-      }
-
-      case LITERAL:
-        return literal_;
-
-      default:
-        UNREACHABLE();
-        return Handle<Object>::null();
-    }
-  }
-
- private:
-  Address addr_;
-  Handle<Object> literal_;
-  SlotRepresentation representation_;
-};
-
-
-static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
-                                          DeoptimizationInputData* data,
-                                          JavaScriptFrame* frame) {
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator->Next());
-
-  switch (opcode) {
-    case Translation::BEGIN:
-    case Translation::FRAME:
-      // Peeled off before getting here.
-      break;
-
-    case Translation::ARGUMENTS_OBJECT:
-      // This can be only emitted for local slots not for argument slots.
-      break;
-
-    case Translation::REGISTER:
-    case Translation::INT32_REGISTER:
-    case Translation::DOUBLE_REGISTER:
-    case Translation::DUPLICATE:
-      // We are at safepoint which corresponds to call.  All registers are
-      // saved by caller so there would be no live registers at this
-      // point. Thus these translation commands should not be used.
-      break;
-
-    case Translation::STACK_SLOT: {
-      int slot_index = iterator->Next();
-      Address slot_addr = SlotAddress(frame, slot_index);
-      return SlotRef(slot_addr, SlotRef::TAGGED);
-    }
-
-    case Translation::INT32_STACK_SLOT: {
-      int slot_index = iterator->Next();
-      Address slot_addr = SlotAddress(frame, slot_index);
-      return SlotRef(slot_addr, SlotRef::INT32);
-    }
-
-    case Translation::DOUBLE_STACK_SLOT: {
-      int slot_index = iterator->Next();
-      Address slot_addr = SlotAddress(frame, slot_index);
-      return SlotRef(slot_addr, SlotRef::DOUBLE);
-    }
-
-    case Translation::LITERAL: {
-      int literal_index = iterator->Next();
-      return SlotRef(data->LiteralArray()->get(literal_index));
-    }
-  }
-
-  UNREACHABLE();
-  return SlotRef();
-}
-
-
-
-
-
-static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
-                                           int inlined_frame_index,
-                                           Vector<SlotRef>* args_slots) {
-  AssertNoAllocation no_gc;
-  int deopt_index = AstNode::kNoNumber;
-  DeoptimizationInputData* data =
-      static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
-  TranslationIterator it(data->TranslationByteArray(),
-                         data->TranslationIndex(deopt_index)->value());
-  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-  ASSERT(opcode == Translation::BEGIN);
-  int frame_count = it.Next();
-  USE(frame_count);
-  ASSERT(frame_count > inlined_frame_index);
-  int frames_to_skip = inlined_frame_index;
-  while (true) {
-    opcode = static_cast<Translation::Opcode>(it.Next());
-    // Skip over operands to advance to the next opcode.
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
-    if (opcode == Translation::FRAME) {
-      if (frames_to_skip == 0) {
-        // We reached the frame corresponding to the inlined function
-        // in question.  Process the translation commands for the
-        // arguments.
-        //
-        // Skip the translation command for the receiver.
-        it.Skip(Translation::NumberOfOperandsFor(
-            static_cast<Translation::Opcode>(it.Next())));
-        // Compute slots for arguments.
-        for (int i = 0; i < args_slots->length(); ++i) {
-          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
-        }
-        return;
-      }
-      frames_to_skip--;
-    }
-  }
-
-  UNREACHABLE();
-}
-
 
 static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
     JavaScriptFrame* frame,
@@ -742,7 +576,9 @@
   Factory* factory = Isolate::Current()->factory();
   int args_count = inlined_function->shared()->formal_parameter_count();
   ScopedVector<SlotRef> args_slots(args_count);
-  ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
+  SlotRef::ComputeSlotMappingForArguments(frame,
+                                          inlined_frame_index,
+                                          &args_slots);
   Handle<JSObject> arguments =
       factory->NewArgumentsObject(inlined_function, args_count);
   Handle<FixedArray> array = factory->NewFixedArray(args_count);
@@ -767,7 +603,7 @@
 
   // Find the top invocation of the function by traversing frames.
   List<JSFunction*> functions(2);
-  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     frame->GetFunctions(&functions);
     for (int i = functions.length() - 1; i >= 0; i--) {
@@ -856,7 +692,7 @@
   Handle<JSFunction> function(holder, isolate);
 
   List<JSFunction*> functions(2);
-  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+  for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     frame->GetFunctions(&functions);
     for (int i = functions.length() - 1; i >= 0; i--) {
diff --git a/src/api.cc b/src/api.cc
index 2bfa598..c3684f7 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1303,7 +1303,7 @@
   }
   // Copy the data to align it.
   unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
-  i::MemCopy(deserialized_data, data, length);
+  i::OS::MemCopy(deserialized_data, data, length);
 
   return new i::ScriptDataImpl(
       i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@@ -2581,6 +2581,9 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+  // We do not allow exceptions thrown while setting the prototype
+  // to propagate outside.
+  TryCatch try_catch;
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result = i::SetPrototype(self, value_obj);
   has_pending_exception = result.is_null();
@@ -2793,6 +2796,26 @@
 }
 
 
+static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
+                                        i::Handle<i::JSObject> receiver,
+                                        i::Handle<i::String> name,
+                                        i::LookupResult* lookup) {
+  if (!lookup->IsProperty()) {
+    // No real property was found.
+    return Local<Value>();
+  }
+
+  // If the property being looked up is a callback, it can throw
+  // an exception.
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+
+  return Utils::ToLocal(result);
+}
+
+
 Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
       Handle<String> key) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -2804,17 +2827,7 @@
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::LookupResult lookup;
   self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
-  if (lookup.IsProperty()) {
-    PropertyAttributes attributes;
-    i::Object* property =
-        self_obj->GetProperty(*self_obj,
-                              &lookup,
-                              *key_obj,
-                              &attributes)->ToObjectUnchecked();
-    i::Handle<i::Object> result(property);
-    return Utils::ToLocal(result);
-  }
-  return Local<Value>();  // No real property was found in prototype chain.
+  return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
 
 
@@ -2827,17 +2840,7 @@
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::LookupResult lookup;
   self_obj->LookupRealNamedProperty(*key_obj, &lookup);
-  if (lookup.IsProperty()) {
-    PropertyAttributes attributes;
-    i::Object* property =
-        self_obj->GetProperty(*self_obj,
-                              &lookup,
-                              *key_obj,
-                              &attributes)->ToObjectUnchecked();
-    i::Handle<i::Object> result(property);
-    return Utils::ToLocal(result);
-  }
-  return Local<Value>();  // No real property was found in prototype chain.
+  return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
 
 
@@ -2880,6 +2883,33 @@
 }
 
 
+static i::Context* GetCreationContext(i::JSObject* object) {
+  i::Object* constructor = object->map()->constructor();
+  i::JSFunction* function;
+  if (!constructor->IsJSFunction()) {
+    // API functions have null as a constructor,
+    // but any JSFunction knows its context immediately.
+    ASSERT(object->IsJSFunction() &&
+           i::JSFunction::cast(object)->shared()->IsApiFunction());
+    function = i::JSFunction::cast(object);
+  } else {
+    function = i::JSFunction::cast(constructor);
+  }
+  return function->context()->global_context();
+}
+
+
+Local<v8::Context> v8::Object::CreationContext() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate,
+             "v8::Object::CreationContext()", return Local<v8::Context>());
+  ENTER_V8(isolate);
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Context* context = GetCreationContext(*self);
+  return Utils::ToLocal(i::Handle<i::Context>(context));
+}
+
+
 int v8::Object::GetIdentityHash() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
@@ -3679,6 +3709,7 @@
 
     // Create the environment.
     env = isolate->bootstrapper()->CreateEnvironment(
+        isolate,
         Utils::OpenHandle(*global_object),
         proxy_template,
         extensions);
@@ -4177,9 +4208,11 @@
 
     // Call ResetDateCache(0 but expect no exceptions:
     bool caught_exception = false;
-    i::Handle<i::Object> result =
-        i::Execution::TryCall(func, isolate->js_builtins_object(), 0, NULL,
-        &caught_exception);
+    i::Execution::TryCall(func,
+                          isolate->js_builtins_object(),
+                          0,
+                          NULL,
+                          &caught_exception);
   }
 }
 
@@ -4248,7 +4281,9 @@
   ENTER_V8(isolate);
   int real_length = length > 0 ? length : 0;
   i::Handle<i::JSArray> obj = isolate->factory()->NewJSArray(real_length);
-  obj->set_length(*isolate->factory()->NewNumberFromInt(real_length));
+  i::Handle<i::Object> length_obj =
+      isolate->factory()->NewNumberFromInt(real_length);
+  obj->set_length(*length_obj);
   return Utils::ToLocal(obj);
 }
 
@@ -4444,7 +4479,7 @@
   if (IsDeadCheck(isolate, "v8::V8::AddImplicitReferences()")) return;
   STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
   isolate->global_handles()->AddImplicitReferences(
-      *Utils::OpenHandle(*parent),
+      i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(),
       reinterpret_cast<i::Object***>(children), length);
 }
 
@@ -4593,7 +4628,7 @@
 int V8::GetCurrentThreadId() {
   i::Isolate* isolate = i::Isolate::Current();
   EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
-  return isolate->thread_id();
+  return isolate->thread_id().ToInteger();
 }
 
 
@@ -4604,10 +4639,11 @@
   // If the thread_id identifies the current thread just terminate
   // execution right away.  Otherwise, ask the thread manager to
   // terminate the thread with the given id if any.
-  if (thread_id == isolate->thread_id()) {
+  i::ThreadId internal_tid = i::ThreadId::FromInteger(thread_id);
+  if (isolate->thread_id().Equals(internal_tid)) {
     isolate->stack_guard()->TerminateExecution();
   } else {
-    isolate->thread_manager()->TerminateExecution(thread_id);
+    isolate->thread_manager()->TerminateExecution(internal_tid);
   }
 }
 
diff --git a/src/api.h b/src/api.h
index 6d46713..7423d28 100644
--- a/src/api.h
+++ b/src/api.h
@@ -53,8 +53,8 @@
 class NeanderObject {
  public:
   explicit NeanderObject(int size);
-  inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
-  inline NeanderObject(v8::internal::Object* obj);
+  explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
+  explicit inline NeanderObject(v8::internal::Object* obj);
   inline v8::internal::Object* get(int index);
   inline void set(int index, v8::internal::Object* value);
   inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
@@ -69,7 +69,7 @@
 class NeanderArray {
  public:
   NeanderArray();
-  inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
+  explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
   inline v8::internal::Handle<v8::internal::JSObject> value() {
     return obj_.value();
   }
diff --git a/src/arguments.h b/src/arguments.h
index c80548f..a7a30e2 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -99,8 +99,17 @@
   Object* values_[3];
 };
 
-#define RUNTIME_CALLING_CONVENTION Arguments args, Isolate* isolate
-#define RUNTIME_GET_ISOLATE ASSERT(isolate == Isolate::Current())
+
+#define DECLARE_RUNTIME_FUNCTION(Type, Name)    \
+Type Name(Arguments args, Isolate* isolate)
+
+
+#define RUNTIME_FUNCTION(Type, Name)            \
+Type Name(Arguments args, Isolate* isolate)
+
+
+#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
+
 
 } }  // namespace v8::internal
 
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index bd76d9a..3e19a45 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -223,9 +223,9 @@
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -234,7 +234,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index be34df9..fd8e8b5 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -44,11 +44,12 @@
 namespace v8 {
 namespace internal {
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+
 
 #ifdef __arm__
 static uint64_t CpuFeaturesImpliedByCompiler() {
@@ -58,48 +59,52 @@
 #endif  // def CAN_USE_ARMV7_INSTRUCTIONS
   // If the compiler is allowed to use VFP then we can use VFP too in our code
   // generation even when generating snapshots.  This won't work for cross
-  // compilation.
+  // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
-  answer |= 1u << VFP3;
+  answer |= 1u << VFP3 | 1u << ARMv7;
 #endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
 #ifdef CAN_USE_VFP_INSTRUCTIONS
-  answer |= 1u << VFP3;
+  answer |= 1u << VFP3 | 1u << ARMv7;
 #endif  // def CAN_USE_VFP_INSTRUCTIONS
   return answer;
 }
 #endif  // def __arm__
 
 
-void CpuFeatures::Probe(bool portable) {
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
 #ifndef __arm__
-  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
+  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
+  // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
   if (FLAG_enable_vfp3) {
-    supported_ |= 1u << VFP3;
+    supported_ |= 1u << VFP3 | 1u << ARMv7;
   }
   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
   if (FLAG_enable_armv7) {
     supported_ |= 1u << ARMv7;
   }
 #else  // def __arm__
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     supported_ |= CpuFeaturesImpliedByCompiler();
     return;  // No features if we might serialize.
   }
 
   if (OS::ArmCpuHasFeature(VFP3)) {
-    // This implementation also sets the VFP flags if
-    // runtime detection of VFP returns true.
-    supported_ |= 1u << VFP3;
-    found_by_runtime_probing_ |= 1u << VFP3;
+    // This implementation also sets the VFP flags if runtime
+    // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
+    // 0406B, page A1-6.
+    supported_ |= 1u << VFP3 | 1u << ARMv7;
+    found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
   }
 
   if (OS::ArmCpuHasFeature(ARMv7)) {
     supported_ |= 1u << ARMv7;
     found_by_runtime_probing_ |= 1u << ARMv7;
   }
-
-  if (!portable) found_by_runtime_probing_ = 0;
 #endif
 }
 
@@ -268,8 +273,8 @@
 static const int kMinimalBufferSize = 4*KB;
 
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
       allow_peephole_optimization_(false),
       emit_debug_code_(FLAG_debug_code) {
@@ -715,7 +720,7 @@
         *instr ^= kMovMvnFlip;
         return true;
       } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
-        if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+        if (CpuFeatures::IsSupported(ARMv7)) {
           if (imm32 < 0x10000) {
             *instr ^= kMovwLeaveCCFlip;
             *instr |= EncodeMovwImmediate(imm32);
@@ -779,7 +784,7 @@
     // condition code additional instruction conventions can be used.
     if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
       if (must_use_constant_pool() ||
-          !Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+          !CpuFeatures::IsSupported(ARMv7)) {
         // mov instruction will be an ldr from constant pool (one instruction).
         return true;
       } else {
@@ -822,7 +827,7 @@
       Condition cond = Instruction::ConditionField(instr);
       if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
         if (x.must_use_constant_pool() ||
-            !isolate()->cpu_features()->IsSupported(ARMv7)) {
+            !CpuFeatures::IsSupported(ARMv7)) {
           RecordRelocInfo(x.rmode_, x.imm32_);
           ldr(rd, MemOperand(pc, 0), cond);
         } else {
@@ -1265,7 +1270,7 @@
                      const Operand& src,
                      Condition cond) {
   // v6 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.rm_.is(pc));
   ASSERT((satpos >= 0) && (satpos <= 31));
   ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
@@ -1293,7 +1298,7 @@
                      int width,
                      Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1313,7 +1318,7 @@
                      int width,
                      Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1328,7 +1333,7 @@
 //   bfc dst, #lsb, #width
 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1347,7 +1352,7 @@
                     int width,
                     Condition cond) {
   // v7 and above.
-  ASSERT(isolate()->cpu_features()->IsSupported(ARMv7));
+  ASSERT(CpuFeatures::IsSupported(ARMv7));
   ASSERT(!dst.is(pc) && !src.is(pc));
   ASSERT((lsb >= 0) && (lsb <= 31));
   ASSERT((width >= 1) && (width <= (32 - lsb)));
@@ -1619,7 +1624,7 @@
 
 void Assembler::ldrd(Register dst1, Register dst2,
                      const MemOperand& src, Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   ASSERT(src.rm().is(no_reg));
   ASSERT(!dst1.is(lr));  // r14.
   ASSERT_EQ(0, dst1.code() % 2);
@@ -1634,7 +1639,7 @@
   ASSERT(!src1.is(lr));  // r14.
   ASSERT_EQ(0, src1.code() % 2);
   ASSERT_EQ(src1.code() + 1, src2.code());
-  ASSERT(isolate()->cpu_features()->IsEnabled(ARMv7));
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
@@ -1821,45 +1826,6 @@
 }
 
 
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    const MemOperand& dst,
-                    LFlag l,
-                    Condition cond) {
-  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    Register rn,
-                    int option,
-                    LFlag l,
-                    Condition cond) {
-  // Unindexed addressing.
-  ASSERT(is_uint8(option));
-  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
-       coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
-                     coproc, CRegister crd,
-                     const MemOperand& dst,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, dst, l, kSpecialCondition);
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
 // Support for VFP.
 
 void Assembler::vldr(const DwVfpRegister dst,
@@ -1870,7 +1836,7 @@
   // Instruction details available in ARM DDI 0406A, A8-628.
   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1011(11-8) | offset
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -1912,7 +1878,7 @@
   // Instruction details available in ARM DDI 0406A, A8-628.
   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | offset
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -1956,7 +1922,7 @@
   // Instruction details available in ARM DDI 0406A, A8-786.
   // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
   // Vsrc(15-12) | 1011(11-8) | (offset/4)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -1997,7 +1963,7 @@
   // Instruction details available in ARM DDI 0406A, A8-786.
   // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
   // Vdst(15-12) | 1010(11-8) | (offset/4)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int u = 1;
   if (offset < 0) {
     offset = -offset;
@@ -2032,6 +1998,88 @@
 }
 
 
+void  Assembler::vldm(BlockAddrMode am,
+                      Register base,
+                      DwVfpRegister first,
+                      DwVfpRegister last,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406A, A8-626.
+  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
+  // first(15-12) | 1010(11-8) | (count * 2)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT_LE(first.code(), last.code());
+  ASSERT(am == ia || am == ia_w || am == db_w);
+  ASSERT(!base.is(pc));
+
+  int sd, d;
+  first.split_code(&sd, &d);
+  int count = last.code() - first.code() + 1;
+  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
+       0xB*B8 | count*2);
+}
+
+
+void  Assembler::vstm(BlockAddrMode am,
+                      Register base,
+                      DwVfpRegister first,
+                      DwVfpRegister last,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406A, A8-784.
+  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
+  // first(15-12) | 1011(11-8) | (count * 2)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT_LE(first.code(), last.code());
+  ASSERT(am == ia || am == ia_w || am == db_w);
+  ASSERT(!base.is(pc));
+
+  int sd, d;
+  first.split_code(&sd, &d);
+  int count = last.code() - first.code() + 1;
+  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
+       0xB*B8 | count*2);
+}
+
+void  Assembler::vldm(BlockAddrMode am,
+                      Register base,
+                      SwVfpRegister first,
+                      SwVfpRegister last,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406A, A8-626.
+  // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
+  // first(15-12) | 1010(11-8) | (count/2)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT_LE(first.code(), last.code());
+  ASSERT(am == ia || am == ia_w || am == db_w);
+  ASSERT(!base.is(pc));
+
+  int sd, d;
+  first.split_code(&sd, &d);
+  int count = last.code() - first.code() + 1;
+  emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
+       0xA*B8 | count);
+}
+
+
+void  Assembler::vstm(BlockAddrMode am,
+                      Register base,
+                      SwVfpRegister first,
+                      SwVfpRegister last,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406A, A8-784.
+  // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
+  // first(15-12) | 1011(11-8) | (count/2)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT_LE(first.code(), last.code());
+  ASSERT(am == ia || am == ia_w || am == db_w);
+  ASSERT(!base.is(pc));
+
+  int sd, d;
+  first.split_code(&sd, &d);
+  int count = last.code() - first.code() + 1;
+  emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
+       0xA*B8 | count);
+}
+
 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
   uint64_t i;
   memcpy(&i, &d, 8);
@@ -2043,7 +2091,7 @@
 // Only works for little endian floating point formats.
 // We don't support VFP on the mixed endian floating point platform.
 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
-  ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
 
   // VMOV can accept an immediate of the form:
   //
@@ -2096,7 +2144,7 @@
                      const Condition cond) {
   // Dd = immediate
   // Instruction details available in ARM DDI 0406B, A8-640.
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
 
   uint32_t enc;
   if (FitsVMOVDoubleImmediate(imm, &enc)) {
@@ -2133,7 +2181,7 @@
                      const Condition cond) {
   // Sd = Sm
   // Instruction details available in ARM DDI 0406B, A8-642.
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   int sd, d, sm, m;
   dst.split_code(&sd, &d);
   src.split_code(&sm, &m);
@@ -2146,7 +2194,7 @@
                      const Condition cond) {
   // Dd = Dm
   // Instruction details available in ARM DDI 0406B, A8-642.
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0xB*B20 |
        dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
 }
@@ -2160,7 +2208,7 @@
   // Instruction details available in ARM DDI 0406A, A8-646.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!src1.is(pc) && !src2.is(pc));
   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
        src1.code()*B12 | 0xB*B8 | B4 | dst.code());
@@ -2175,7 +2223,7 @@
   // Instruction details available in ARM DDI 0406A, A8-646.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!dst1.is(pc) && !dst2.is(pc));
   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
        dst1.code()*B12 | 0xB*B8 | B4 | src.code());
@@ -2189,7 +2237,7 @@
   // Instruction details available in ARM DDI 0406A, A8-642.
   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!src.is(pc));
   int sn, n;
   dst.split_code(&sn, &n);
@@ -2204,7 +2252,7 @@
   // Instruction details available in ARM DDI 0406A, A8-642.
   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(!dst.is(pc));
   int sn, n;
   src.split_code(&sn, &n);
@@ -2329,7 +2377,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
 }
 
@@ -2338,7 +2386,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
 }
 
@@ -2347,7 +2395,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
 }
 
@@ -2356,7 +2404,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -2365,7 +2413,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -2374,7 +2422,7 @@
                              const SwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
 }
 
@@ -2383,7 +2431,7 @@
                              const DwVfpRegister src,
                              VFPConversionMode mode,
                              const Condition cond) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
 }
 
@@ -2413,7 +2461,7 @@
   // Instruction details available in ARM DDI 0406A, A8-536.
   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
 }
@@ -2428,7 +2476,7 @@
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
 }
@@ -2443,7 +2491,7 @@
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
 }
@@ -2458,7 +2506,7 @@
   // Instruction details available in ARM DDI 0406A, A8-584.
   // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
 }
@@ -2471,7 +2519,7 @@
   // Instruction details available in ARM DDI 0406A, A8-570.
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
        src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
 }
@@ -2484,7 +2532,7 @@
   // Instruction details available in ARM DDI 0406A, A8-570.
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   ASSERT(src2 == 0.0);
   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
        src1.code()*B12 | 0x5*B9 | B8 | B6);
@@ -2495,7 +2543,7 @@
   // Instruction details available in ARM DDI 0406A, A8-652.
   // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0xE*B20 |  B16 |
        dst.code()*B12 | 0xA*B8 | B4);
 }
@@ -2505,7 +2553,7 @@
   // Instruction details available in ARM DDI 0406A, A8-652.
   // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
        dst.code()*B12 | 0xA*B8 | B4);
 }
@@ -2516,7 +2564,7 @@
                       const Condition cond) {
   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
   // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
-  ASSERT(isolate()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
   emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
        dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
 }
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 91e6244..9050c2c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // A light-weight ARM Assembler
 // Generates user mode instructions for the ARM architecture up to version 5
@@ -468,58 +468,97 @@
 
 // CpuFeatures keeps track of which features are supported by the target CPU.
 // Supported features must be enabled by a Scope before use.
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == VFP3 && !FLAG_enable_vfp3) return false;
     return (supported_ & (1u << f)) != 0;
   }
 
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (1u << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+    return (enabled & (1u << f)) != 0;
   }
+#endif
 
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      unsigned mask = 1u << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-             (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= 1u << f;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    unsigned old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    unsigned old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (1u << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const unsigned old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  unsigned supported_;
-  unsigned enabled_;
-  unsigned found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static unsigned supported_;
+  static unsigned found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -564,7 +603,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
@@ -908,16 +947,6 @@
   void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
             LFlag l = Short);  // v5 and above
 
-  void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-           LFlag l = Short, Condition cond = al);
-  void stc(Coprocessor coproc, CRegister crd, Register base, int option,
-           LFlag l = Short, Condition cond = al);
-
-  void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-            LFlag l = Short);  // v5 and above
-  void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
-            LFlag l = Short);  // v5 and above
-
   // Support for VFP.
   // All these APIs support S0 to S31 and D0 to D15.
   // Currently these APIs do not support extended D registers, i.e, D16 to D31.
@@ -956,6 +985,30 @@
             const MemOperand& dst,
             const Condition cond = al);
 
+  void vldm(BlockAddrMode am,
+            Register base,
+            DwVfpRegister first,
+            DwVfpRegister last,
+            Condition cond = al);
+
+  void vstm(BlockAddrMode am,
+            Register base,
+            DwVfpRegister first,
+            DwVfpRegister last,
+            Condition cond = al);
+
+  void vldm(BlockAddrMode am,
+            Register base,
+            SwVfpRegister first,
+            SwVfpRegister last,
+            Condition cond = al);
+
+  void vstm(BlockAddrMode am,
+            Register base,
+            SwVfpRegister first,
+            SwVfpRegister last,
+            Condition cond = al);
+
   void vmov(const DwVfpRegister dst,
             double imm,
             const Condition cond = al);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index f401cfd..5235dd3 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_ARM)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
@@ -1173,9 +1173,11 @@
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  // Probe the CPU to set the supported features, because this builtin
-  // may be called before the initialization performs CPU setup.
-  masm->isolate()->cpu_features()->Probe(false);
+  CpuFeatures::TryForceFeatureScope scope(VFP3);
+  if (!CpuFeatures::IsSupported(VFP3)) {
+    __ Abort("Unreachable code: Cannot optimize without VFP3 support.");
+    return;
+  }
 
   // Lookup the function in the JavaScript frame and push it as an
   // argument to the on-stack replacement function.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 441adfe..d66daea 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -308,13 +308,9 @@
 
 
 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
   Register exponent = result1_;
   Register mantissa = result2_;
-#else
-  Register exponent = result2_;
-  Register mantissa = result1_;
-#endif
+
   Label not_special;
   // Convert from Smi to integer.
   __ mov(source_, Operand(source_, ASR, kSmiTagSize));
@@ -502,7 +498,7 @@
                                    FloatingPointHelper::Destination destination,
                                    Register scratch1,
                                    Register scratch2) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
     __ vmov(d7.high(), scratch1);
@@ -521,7 +517,7 @@
     ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
     __ push(lr);
     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
-    // Write Smi from r1 to r1 and r0 in double format.  r9 is scratch.
+    // Write Smi from r1 to r1 and r0 in double format.
     __ mov(scratch1, Operand(r1));
     ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
     __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
@@ -570,7 +566,7 @@
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
 
   // Handle loading a double from a heap number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+  if (CpuFeatures::IsSupported(VFP3) &&
       destination == kVFPRegisters) {
     CpuFeatures::Scope scope(VFP3);
     // Load the double from tagged HeapNumber to double register.
@@ -585,7 +581,7 @@
 
   // Handle loading a double from a smi.
   __ bind(&is_smi);
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Convert smi to double using VFP instructions.
     __ SmiUntag(scratch1, object);
@@ -676,7 +672,7 @@
 
   __ JumpIfNotSmi(object, &obj_is_not_smi);
   __ SmiUntag(scratch1, object);
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(single_scratch, scratch1);
     __ vcvt_f64_s32(double_dst, single_scratch);
@@ -686,51 +682,51 @@
   } else {
     Label fewer_than_20_useful_bits;
     // Expected output:
-    // |         dst1            |         dst2            |
+    // |         dst2            |         dst1            |
     // | s |   exp   |              mantissa               |
 
     // Check for zero.
     __ cmp(scratch1, Operand(0));
-    __ mov(dst1, scratch1);
     __ mov(dst2, scratch1);
+    __ mov(dst1, scratch1);
     __ b(eq, &done);
 
     // Preload the sign of the value.
-    __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
+    __ and_(dst2, scratch1, Operand(HeapNumber::kSignMask), SetCC);
     // Get the absolute value of the object (as an unsigned integer).
     __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
 
     // Get mantisssa[51:20].
 
     // Get the position of the first set bit.
-    __ CountLeadingZeros(dst2, scratch1, scratch2);
-    __ rsb(dst2, dst2, Operand(31));
+    __ CountLeadingZeros(dst1, scratch1, scratch2);
+    __ rsb(dst1, dst1, Operand(31));
 
     // Set the exponent.
-    __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
-    __ Bfi(dst1, scratch2, scratch2,
+    __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+    __ Bfi(dst2, scratch2, scratch2,
         HeapNumber::kExponentShift, HeapNumber::kExponentBits);
 
     // Clear the first non null bit.
     __ mov(scratch2, Operand(1));
-    __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
+    __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst1));
 
-    __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
     // Get the number of bits to set in the lower part of the mantissa.
-    __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+    __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
     __ b(mi, &fewer_than_20_useful_bits);
     // Set the higher 20 bits of the mantissa.
-    __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
+    __ orr(dst2, dst2, Operand(scratch1, LSR, scratch2));
     __ rsb(scratch2, scratch2, Operand(32));
-    __ mov(dst2, Operand(scratch1, LSL, scratch2));
+    __ mov(dst1, Operand(scratch1, LSL, scratch2));
     __ b(&done);
 
     __ bind(&fewer_than_20_useful_bits);
-    __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
+    __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
     __ mov(scratch2, Operand(scratch1, LSL, scratch2));
-    __ orr(dst1, dst1, scratch2);
-    // Set dst2 to 0.
-    __ mov(dst2, Operand(0));
+    __ orr(dst2, dst2, scratch2);
+    // Set dst1 to 0.
+    __ mov(dst1, Operand(0));
   }
 
   __ b(&done);
@@ -744,7 +740,7 @@
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
 
   // Load the number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Load the double value.
     __ sub(scratch1, object, Operand(kHeapObjectTag));
@@ -818,7 +814,7 @@
 
   // Object is a heap number.
   // Convert the floating point value to a 32-bit integer.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     SwVfpRegister single_scratch = double_scratch.low();
     // Load the double value.
@@ -951,18 +947,10 @@
   // Call C routine that may not cause GC or other trouble.
   __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
                    4);
-  // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
-  // Double returned in fp coprocessor register 0 and 1, encoded as
-  // register cr8.  Offsets must be divisible by 4 for coprocessor so we
-  // need to substract the tag from heap_number_result.
-  __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
-  __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
-#else
-  // Double returned in registers 0 and 1.
+  // Store answer in the overwritable heap number. Double returned in
+  // registers r0 and r1.
   __ Strd(r0, r1, FieldMemOperand(heap_number_result,
                                   HeapNumber::kValueOffset));
-#endif
   // Place heap_number_result in r0 and return to the pushed return address.
   __ mov(r0, Operand(heap_number_result));
   __ pop(pc);
@@ -1153,7 +1141,7 @@
   }
 
   // Lhs is a smi, rhs is a number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     // Convert lhs to a double in d7.
     CpuFeatures::Scope scope(VFP3);
     __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
@@ -1193,7 +1181,7 @@
   }
 
   // Rhs is a smi, lhs is a heap number.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Load the double from lhs, tagged HeapNumber r1, to d7.
     __ sub(r7, lhs, Operand(kHeapObjectTag));
@@ -1373,7 +1361,7 @@
 
   // Both are heap numbers.  Load them up then jump to the code we have
   // for that.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ sub(r7, rhs, Operand(kHeapObjectTag));
     __ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -1463,7 +1451,7 @@
   Label load_result_from_cache;
   if (!object_is_smi) {
     __ JumpIfSmi(object, &is_smi);
-    if (isolate->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       __ CheckMap(object,
                   scratch1,
@@ -1597,7 +1585,7 @@
   // The arguments have been converted to doubles and stored in d6 and d7, if
   // VFP3 is supported, or in r0, r1, r2, and r3.
   Isolate* isolate = masm->isolate();
-  if (isolate->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     __ bind(&lhs_not_nan);
     CpuFeatures::Scope scope(VFP3);
     Label no_nan;
@@ -1707,7 +1695,7 @@
 // The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses VFP3 instructions.
-  ASSERT(Isolate::Current()->cpu_features()->IsEnabled(VFP3));
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
 
   Label false_result;
   Label not_heap_number;
@@ -1780,1064 +1768,6 @@
 }
 
 
-// We fall into this code if the operands were Smis, but the result was
-// not (eg. overflow).  We branch into this code (to the not_smi label) if
-// the operands were not both Smi.  The operands are in r0 and r1.  In order
-// to call the C-implemented binary fp operation routines we need to end up
-// with the double precision floating point operands in r0 and r1 (for the
-// value in r1) and r2 and r3 (for the value in r0).
-void GenericBinaryOpStub::HandleBinaryOpSlowCases(
-    MacroAssembler* masm,
-    Label* not_smi,
-    Register lhs,
-    Register rhs,
-    const Builtins::JavaScript& builtin) {
-  Label slow, slow_reverse, do_the_call;
-  bool use_fp_registers =
-      Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
-      Token::MOD != op_;
-
-  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
-  Register heap_number_map = r6;
-
-  if (ShouldGenerateSmiCode()) {
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-    // Smi-smi case (overflow).
-    // Since both are Smis there is no heap number to overwrite, so allocate.
-    // The new heap number is in r5.  r3 and r7 are scratch.
-    __ AllocateHeapNumber(
-        r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
-
-    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
-    // using registers d7 and d6 for the double values.
-    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-      CpuFeatures::Scope scope(VFP3);
-      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
-      __ vmov(s15, r7);
-      __ vcvt_f64_s32(d7, s15);
-      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
-      __ vmov(s13, r7);
-      __ vcvt_f64_s32(d6, s13);
-      if (!use_fp_registers) {
-        __ vmov(r2, r3, d7);
-        __ vmov(r0, r1, d6);
-      }
-    } else {
-      // Write Smi from rhs to r3 and r2 in double format.  r9 is scratch.
-      __ mov(r7, Operand(rhs));
-      ConvertToDoubleStub stub1(r3, r2, r7, r9);
-      __ push(lr);
-      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
-      // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch.
-      __ mov(r7, Operand(lhs));
-      ConvertToDoubleStub stub2(r1, r0, r7, r9);
-      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
-      __ pop(lr);
-    }
-    __ jmp(&do_the_call);  // Tail call.  No return.
-  }
-
-  // We branch here if at least one of r0 and r1 is not a Smi.
-  __ bind(not_smi);
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-  // After this point we have the left hand side in r1 and the right hand side
-  // in r0.
-  if (lhs.is(r0)) {
-    __ Swap(r0, r1, ip);
-  }
-
-  // The type transition also calculates the answer.
-  bool generate_code_to_calculate_answer = true;
-
-  if (ShouldGenerateFPCode()) {
-    // DIV has neither SmiSmi fast code nor specialized slow code.
-    // So don't try to patch a DIV Stub.
-    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
-      switch (op_) {
-        case Token::ADD:
-        case Token::SUB:
-        case Token::MUL:
-          GenerateTypeTransition(masm);  // Tail call.
-          generate_code_to_calculate_answer = false;
-          break;
-
-        case Token::DIV:
-          // DIV has neither SmiSmi fast code nor specialized slow code.
-          // So don't try to patch a DIV Stub.
-          break;
-
-        default:
-          break;
-      }
-    }
-
-    if (generate_code_to_calculate_answer) {
-      Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
-      if (mode_ == NO_OVERWRITE) {
-        // In the case where there is no chance of an overwritable float we may
-        // as well do the allocation immediately while r0 and r1 are untouched.
-        __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
-      }
-
-      // Move r0 to a double in r2-r3.
-      __ tst(r0, Operand(kSmiTagMask));
-      __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
-      __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
-      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-      __ cmp(r4, heap_number_map);
-      __ b(ne, &slow);
-      if (mode_ == OVERWRITE_RIGHT) {
-        __ mov(r5, Operand(r0));  // Overwrite this heap number.
-      }
-      if (use_fp_registers) {
-        CpuFeatures::Scope scope(VFP3);
-        // Load the double from tagged HeapNumber r0 to d7.
-        __ sub(r7, r0, Operand(kHeapObjectTag));
-        __ vldr(d7, r7, HeapNumber::kValueOffset);
-      } else {
-        // Calling convention says that second double is in r2 and r3.
-        __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
-      }
-      __ jmp(&finished_loading_r0);
-      __ bind(&r0_is_smi);
-      if (mode_ == OVERWRITE_RIGHT) {
-        // We can't overwrite a Smi so get address of new heap number into r5.
-      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-      }
-
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-        CpuFeatures::Scope scope(VFP3);
-        // Convert smi in r0 to double in d7.
-        __ mov(r7, Operand(r0, ASR, kSmiTagSize));
-        __ vmov(s15, r7);
-        __ vcvt_f64_s32(d7, s15);
-        if (!use_fp_registers) {
-          __ vmov(r2, r3, d7);
-        }
-      } else {
-        // Write Smi from r0 to r3 and r2 in double format.
-        __ mov(r7, Operand(r0));
-        ConvertToDoubleStub stub3(r3, r2, r7, r4);
-        __ push(lr);
-        __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
-        __ pop(lr);
-      }
-
-      // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
-      // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
-      Label r1_is_not_smi;
-      if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
-          HasSmiSmiFastPath()) {
-        __ tst(r1, Operand(kSmiTagMask));
-        __ b(ne, &r1_is_not_smi);
-        GenerateTypeTransition(masm);  // Tail call.
-      }
-
-      __ bind(&finished_loading_r0);
-
-      // Move r1 to a double in r0-r1.
-      __ tst(r1, Operand(kSmiTagMask));
-      __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
-      __ bind(&r1_is_not_smi);
-      __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
-      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-      __ cmp(r4, heap_number_map);
-      __ b(ne, &slow);
-      if (mode_ == OVERWRITE_LEFT) {
-        __ mov(r5, Operand(r1));  // Overwrite this heap number.
-      }
-      if (use_fp_registers) {
-        CpuFeatures::Scope scope(VFP3);
-        // Load the double from tagged HeapNumber r1 to d6.
-        __ sub(r7, r1, Operand(kHeapObjectTag));
-        __ vldr(d6, r7, HeapNumber::kValueOffset);
-      } else {
-        // Calling convention says that first double is in r0 and r1.
-        __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
-      }
-      __ jmp(&finished_loading_r1);
-      __ bind(&r1_is_smi);
-      if (mode_ == OVERWRITE_LEFT) {
-        // We can't overwrite a Smi so get address of new heap number into r5.
-      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-      }
-
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-        CpuFeatures::Scope scope(VFP3);
-        // Convert smi in r1 to double in d6.
-        __ mov(r7, Operand(r1, ASR, kSmiTagSize));
-        __ vmov(s13, r7);
-        __ vcvt_f64_s32(d6, s13);
-        if (!use_fp_registers) {
-          __ vmov(r0, r1, d6);
-        }
-      } else {
-        // Write Smi from r1 to r1 and r0 in double format.
-        __ mov(r7, Operand(r1));
-        ConvertToDoubleStub stub4(r1, r0, r7, r9);
-        __ push(lr);
-        __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
-        __ pop(lr);
-      }
-
-      __ bind(&finished_loading_r1);
-    }
-
-    if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
-      __ bind(&do_the_call);
-      // If we are inlining the operation using VFP3 instructions for
-      // add, subtract, multiply, or divide, the arguments are in d6 and d7.
-      if (use_fp_registers) {
-        CpuFeatures::Scope scope(VFP3);
-        // ARMv7 VFP3 instructions to implement
-        // double precision, add, subtract, multiply, divide.
-
-        if (Token::MUL == op_) {
-          __ vmul(d5, d6, d7);
-        } else if (Token::DIV == op_) {
-          __ vdiv(d5, d6, d7);
-        } else if (Token::ADD == op_) {
-          __ vadd(d5, d6, d7);
-        } else if (Token::SUB == op_) {
-          __ vsub(d5, d6, d7);
-        } else {
-          UNREACHABLE();
-        }
-        __ sub(r0, r5, Operand(kHeapObjectTag));
-        __ vstr(d5, r0, HeapNumber::kValueOffset);
-        __ add(r0, r0, Operand(kHeapObjectTag));
-        __ Ret();
-      } else {
-        // If we did not inline the operation, then the arguments are in:
-        // r0: Left value (least significant part of mantissa).
-        // r1: Left value (sign, exponent, top of mantissa).
-        // r2: Right value (least significant part of mantissa).
-        // r3: Right value (sign, exponent, top of mantissa).
-        // r5: Address of heap number for result.
-
-        __ push(lr);   // For later.
-        __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
-        // Call C routine that may not cause GC or other trouble. r5 is callee
-        // save.
-        __ CallCFunction(
-            ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
-        // Store answer in the overwritable heap number.
-    #if !defined(USE_ARM_EABI)
-        // Double returned in fp coprocessor register 0 and 1, encoded as
-        // register cr8.  Offsets must be divisible by 4 for coprocessor so we
-        // need to substract the tag from r5.
-        __ sub(r4, r5, Operand(kHeapObjectTag));
-        __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
-    #else
-        // Double returned in registers 0 and 1.
-        __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
-    #endif
-        __ mov(r0, Operand(r5));
-        // And we are done.
-        __ pop(pc);
-      }
-    }
-  }
-
-  if (!generate_code_to_calculate_answer &&
-      !slow_reverse.is_linked() &&
-      !slow.is_linked()) {
-    return;
-  }
-
-  if (lhs.is(r0)) {
-    __ b(&slow);
-    __ bind(&slow_reverse);
-    __ Swap(r0, r1, ip);
-  }
-
-  heap_number_map = no_reg;  // Don't use this any more from here on.
-
-  // We jump to here if something goes wrong (one param is not a number of any
-  // sort or new-space allocation fails).
-  __ bind(&slow);
-
-  // Push arguments to the stack
-  __ Push(r1, r0);
-
-  if (Token::ADD == op_) {
-    // Test for string arguments before calling runtime.
-    // r1 : first argument
-    // r0 : second argument
-    // sp[0] : second argument
-    // sp[4] : first argument
-
-    Label not_strings, not_string1, string1, string1_smi2;
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(eq, &not_string1);
-    __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
-    __ b(ge, &not_string1);
-
-    // First argument is a a string, test second.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &string1_smi2);
-    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
-    __ b(ge, &string1);
-
-    // First and second argument are strings.
-    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
-    __ TailCallStub(&string_add_stub);
-
-    __ bind(&string1_smi2);
-    // First argument is a string, second is a smi. Try to lookup the number
-    // string for the smi in the number string cache.
-    NumberToStringStub::GenerateLookupNumberStringCache(
-        masm, r0, r2, r4, r5, r6, true, &string1);
-
-    // Replace second argument on stack and tailcall string add stub to make
-    // the result.
-    __ str(r2, MemOperand(sp, 0));
-    __ TailCallStub(&string_add_stub);
-
-    // Only first argument is a string.
-    __ bind(&string1);
-    __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
-
-    // First argument was not a string, test second.
-    __ bind(&not_string1);
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &not_strings);
-    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
-    __ b(ge, &not_strings);
-
-    // Only second argument is a string.
-    __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
-
-    __ bind(&not_strings);
-  }
-
-  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
-}
-
-
-// For bitwise ops where the inputs are not both Smis we here try to determine
-// whether both inputs are either Smis or at least heap numbers that can be
-// represented by a 32 bit signed value.  We truncate towards zero as required
-// by the ES spec.  If this is the case we do the bitwise op and see if the
-// result is a Smi.  If so, great, otherwise we try to find a heap number to
-// write the answer into (either by allocating or by overwriting).
-// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                                                Register lhs,
-                                                Register rhs) {
-  Label slow, result_not_a_smi;
-  Label rhs_is_smi, lhs_is_smi;
-  Label done_checking_rhs, done_checking_lhs;
-
-  Register heap_number_map = r6;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-  __ tst(lhs, Operand(kSmiTagMask));
-  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
-  __ cmp(r4, heap_number_map);
-  __ b(ne, &slow);
-  __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
-  __ jmp(&done_checking_lhs);
-  __ bind(&lhs_is_smi);
-  __ mov(r3, Operand(lhs, ASR, 1));
-  __ bind(&done_checking_lhs);
-
-  __ tst(rhs, Operand(kSmiTagMask));
-  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
-  __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
-  __ cmp(r4, heap_number_map);
-  __ b(ne, &slow);
-  __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
-  __ jmp(&done_checking_rhs);
-  __ bind(&rhs_is_smi);
-  __ mov(r2, Operand(rhs, ASR, 1));
-  __ bind(&done_checking_rhs);
-
-  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
-
-  // r0 and r1: Original operands (Smi or heap numbers).
-  // r2 and r3: Signed int32 operands.
-  switch (op_) {
-    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
-    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
-    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
-    case Token::SAR:
-      // Use only the 5 least significant bits of the shift count.
-      __ and_(r2, r2, Operand(0x1f));
-      __ mov(r2, Operand(r3, ASR, r2));
-      break;
-    case Token::SHR:
-      // Use only the 5 least significant bits of the shift count.
-      __ and_(r2, r2, Operand(0x1f));
-      __ mov(r2, Operand(r3, LSR, r2), SetCC);
-      // SHR is special because it is required to produce a positive answer.
-      // The code below for writing into heap numbers isn't capable of writing
-      // the register as an unsigned int so we go to slow case if we hit this
-      // case.
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-        __ b(mi, &result_not_a_smi);
-      } else {
-        __ b(mi, &slow);
-      }
-      break;
-    case Token::SHL:
-      // Use only the 5 least significant bits of the shift count.
-      __ and_(r2, r2, Operand(0x1f));
-      __ mov(r2, Operand(r3, LSL, r2));
-      break;
-    default: UNREACHABLE();
-  }
-  // check that the *signed* result fits in a smi
-  __ add(r3, r2, Operand(0x40000000), SetCC);
-  __ b(mi, &result_not_a_smi);
-  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
-  __ Ret();
-
-  Label have_to_allocate, got_a_heap_number;
-  __ bind(&result_not_a_smi);
-  switch (mode_) {
-    case OVERWRITE_RIGHT: {
-      __ tst(rhs, Operand(kSmiTagMask));
-      __ b(eq, &have_to_allocate);
-      __ mov(r5, Operand(rhs));
-      break;
-    }
-    case OVERWRITE_LEFT: {
-      __ tst(lhs, Operand(kSmiTagMask));
-      __ b(eq, &have_to_allocate);
-      __ mov(r5, Operand(lhs));
-      break;
-    }
-    case NO_OVERWRITE: {
-      // Get a new heap number in r5.  r4 and r7 are scratch.
-      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-    }
-    default: break;
-  }
-  __ bind(&got_a_heap_number);
-  // r2: Answer as signed int32.
-  // r5: Heap number to write answer into.
-
-  // Nothing can go wrong now, so move the heap number to r0, which is the
-  // result.
-  __ mov(r0, Operand(r5));
-
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(s0, r2);
-    if (op_ == Token::SHR) {
-      __ vcvt_f64_u32(d0, s0);
-    } else {
-      __ vcvt_f64_s32(d0, s0);
-    }
-    __ sub(r3, r0, Operand(kHeapObjectTag));
-    __ vstr(d0, r3, HeapNumber::kValueOffset);
-    __ Ret();
-  } else {
-    // Tail call that writes the int32 in r2 to the heap number in r0, using
-    // r3 as scratch.  r0 is preserved and returned.
-    WriteInt32ToHeapNumberStub stub(r2, r0, r3);
-    __ TailCallStub(&stub);
-  }
-
-  if (mode_ != NO_OVERWRITE) {
-    __ bind(&have_to_allocate);
-    // Get a new heap number in r5.  r4 and r7 are scratch.
-    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
-    __ jmp(&got_a_heap_number);
-  }
-
-  // If all else failed then we go to the runtime system.
-  __ bind(&slow);
-  __ Push(lhs, rhs);  // Restore stack.
-  switch (op_) {
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-
-
-// This function takes the known int in a register for the cases
-// where it doesn't know a good trick, and may deliver
-// a result that needs shifting.
-static void MultiplyByKnownIntInStub(
-    MacroAssembler* masm,
-    Register result,
-    Register source,
-    Register known_int_register,   // Smi tagged.
-    int known_int,
-    int* required_shift) {  // Including Smi tag shift
-  switch (known_int) {
-    case 3:
-      __ add(result, source, Operand(source, LSL, 1));
-      *required_shift = 1;
-      break;
-    case 5:
-      __ add(result, source, Operand(source, LSL, 2));
-      *required_shift = 1;
-      break;
-    case 6:
-      __ add(result, source, Operand(source, LSL, 1));
-      *required_shift = 2;
-      break;
-    case 7:
-      __ rsb(result, source, Operand(source, LSL, 3));
-      *required_shift = 1;
-      break;
-    case 9:
-      __ add(result, source, Operand(source, LSL, 3));
-      *required_shift = 1;
-      break;
-    case 10:
-      __ add(result, source, Operand(source, LSL, 2));
-      *required_shift = 2;
-      break;
-    default:
-      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
-      __ mul(result, source, known_int_register);
-      *required_shift = 0;
-  }
-}
-
-
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
-// trick.  See http://en.wikipedia.org/wiki/Divisibility_rule
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a
-// number from 0 to mask.  On exit the 'eq' condition flags are set if the
-// answer is exactly the mask.
-void IntegerModStub::DigitSum(MacroAssembler* masm,
-                              Register lhs,
-                              int mask,
-                              int shift,
-                              Label* entry) {
-  ASSERT(mask > 0);
-  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
-  Label loop;
-  __ bind(&loop);
-  __ and_(ip, lhs, Operand(mask));
-  __ add(lhs, ip, Operand(lhs, LSR, shift));
-  __ bind(entry);
-  __ cmp(lhs, Operand(mask));
-  __ b(gt, &loop);
-}
-
-
-void IntegerModStub::DigitSum(MacroAssembler* masm,
-                              Register lhs,
-                              Register scratch,
-                              int mask,
-                              int shift1,
-                              int shift2,
-                              Label* entry) {
-  ASSERT(mask > 0);
-  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
-  Label loop;
-  __ bind(&loop);
-  __ bic(scratch, lhs, Operand(mask));
-  __ and_(ip, lhs, Operand(mask));
-  __ add(lhs, ip, Operand(lhs, LSR, shift1));
-  __ add(lhs, lhs, Operand(scratch, LSR, shift2));
-  __ bind(entry);
-  __ cmp(lhs, Operand(mask));
-  __ b(gt, &loop);
-}
-
-
-// Splits the number into two halves (bottom half has shift bits).  The top
-// half is subtracted from the bottom half.  If the result is negative then
-// rhs is added.
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
-                                                Register lhs,
-                                                int shift,
-                                                int rhs) {
-  int mask = (1 << shift) - 1;
-  __ and_(ip, lhs, Operand(mask));
-  __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
-  __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
-}
-
-
-void IntegerModStub::ModReduce(MacroAssembler* masm,
-                               Register lhs,
-                               int max,
-                               int denominator) {
-  int limit = denominator;
-  while (limit * 2 <= max) limit *= 2;
-  while (limit >= denominator) {
-    __ cmp(lhs, Operand(limit));
-    __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
-    limit >>= 1;
-  }
-}
-
-
-void IntegerModStub::ModAnswer(MacroAssembler* masm,
-                               Register result,
-                               Register shift_distance,
-                               Register mask_bits,
-                               Register sum_of_digits) {
-  __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
-  __ Ret();
-}
-
-
-// See comment for class.
-void IntegerModStub::Generate(MacroAssembler* masm) {
-  __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
-  __ bic(odd_number_, odd_number_, Operand(1));
-  __ mov(odd_number_, Operand(odd_number_, LSL, 1));
-  // We now have (odd_number_ - 1) * 2 in the register.
-  // Build a switch out of branches instead of data because it avoids
-  // having to teach the assembler about intra-code-object pointers
-  // that are not in relative branch instructions.
-  Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
-  Label mod21, mod23, mod25;
-  { Assembler::BlockConstPoolScope block_const_pool(masm);
-    __ add(pc, pc, Operand(odd_number_));
-    // When you read pc it is always 8 ahead, but when you write it you always
-    // write the actual value.  So we put in two nops to take up the slack.
-    __ nop();
-    __ nop();
-    __ b(&mod3);
-    __ b(&mod5);
-    __ b(&mod7);
-    __ b(&mod9);
-    __ b(&mod11);
-    __ b(&mod13);
-    __ b(&mod15);
-    __ b(&mod17);
-    __ b(&mod19);
-    __ b(&mod21);
-    __ b(&mod23);
-    __ b(&mod25);
-  }
-
-  // For each denominator we find a multiple that is almost only ones
-  // when expressed in binary.  Then we do the sum-of-digits trick for
-  // that number.  If the multiple is not 1 then we have to do a little
-  // more work afterwards to get the answer into the 0-denominator-1
-  // range.
-  DigitSum(masm, lhs_, 3, 2, &mod3);  // 3 = b11.
-  __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0xf, 4, &mod5);  // 5 * 3 = b1111.
-  ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 7, 3, &mod7);  // 7 = b111.
-  __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0x3f, 6, &mod9);  // 7 * 9 = b111111.
-  ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11);  // 5 * 11 = b110111.
-  ModReduce(masm, lhs_, 0x3f, 11);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13);  // 19 * 13 = b11110111.
-  ModReduce(masm, lhs_, 0xff, 13);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0xf, 4, &mod15);  // 15 = b1111.
-  __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0xff, 8, &mod17);  // 15 * 17 = b11111111.
-  ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19);  // 13 * 19 = b11110111.
-  ModReduce(masm, lhs_, 0xff, 19);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, 0x3f, 6, &mod21);  // 3 * 21 = b111111.
-  ModReduce(masm, lhs_, 0x3f, 21);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23);  // 11 * 23 = b11111101.
-  ModReduce(masm, lhs_, 0xff, 23);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-
-  DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25);  // 5 * 25 = b1111101.
-  ModReduce(masm, lhs_, 0x7f, 25);
-  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  // lhs_ : x
-  // rhs_ : y
-  // r0   : result
-
-  Register result = r0;
-  Register lhs = lhs_;
-  Register rhs = rhs_;
-
-  // This code can't cope with other register allocations yet.
-  ASSERT(result.is(r0) &&
-         ((lhs.is(r0) && rhs.is(r1)) ||
-          (lhs.is(r1) && rhs.is(r0))));
-
-  Register smi_test_reg = r7;
-  Register scratch = r9;
-
-  // All ops need to know whether we are dealing with two Smis.  Set up
-  // smi_test_reg to tell us that.
-  if (ShouldGenerateSmiCode()) {
-    __ orr(smi_test_reg, lhs, Operand(rhs));
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      Label not_smi;
-      // Fast path.
-      if (ShouldGenerateSmiCode()) {
-        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
-        __ tst(smi_test_reg, Operand(kSmiTagMask));
-        __ b(ne, &not_smi);
-        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
-        // Return if no overflow.
-        __ Ret(vc);
-        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
-      }
-      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
-      break;
-    }
-
-    case Token::SUB: {
-      Label not_smi;
-      // Fast path.
-      if (ShouldGenerateSmiCode()) {
-        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
-        __ tst(smi_test_reg, Operand(kSmiTagMask));
-        __ b(ne, &not_smi);
-        if (lhs.is(r1)) {
-          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
-          // Return if no overflow.
-          __ Ret(vc);
-          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
-        } else {
-          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
-          // Return if no overflow.
-          __ Ret(vc);
-          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
-        }
-      }
-      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
-      break;
-    }
-
-    case Token::MUL: {
-      Label not_smi, slow;
-      if (ShouldGenerateSmiCode()) {
-        STATIC_ASSERT(kSmiTag == 0);  // adjust code below
-        __ tst(smi_test_reg, Operand(kSmiTagMask));
-        Register scratch2 = smi_test_reg;
-        smi_test_reg = no_reg;
-        __ b(ne, &not_smi);
-        // Remove tag from one operand (but keep sign), so that result is Smi.
-        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
-        // Do multiplication
-        // scratch = lower 32 bits of ip * lhs.
-        __ smull(scratch, scratch2, lhs, ip);
-        // Go slow on overflows (overflow bit is not set).
-        __ mov(ip, Operand(scratch, ASR, 31));
-        // No overflow if higher 33 bits are identical.
-        __ cmp(ip, Operand(scratch2));
-        __ b(ne, &slow);
-        // Go slow on zero result to handle -0.
-        __ tst(scratch, Operand(scratch));
-        __ mov(result, Operand(scratch), LeaveCC, ne);
-        __ Ret(ne);
-        // We need -0 if we were multiplying a negative number with 0 to get 0.
-        // We know one of them was zero.
-        __ add(scratch2, rhs, Operand(lhs), SetCC);
-        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
-        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
-        // Slow case.  We fall through here if we multiplied a negative number
-        // with 0, because that would mean we should produce -0.
-        __ bind(&slow);
-      }
-      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
-      break;
-    }
-
-    case Token::DIV:
-    case Token::MOD: {
-      Label not_smi;
-      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
-        Label lhs_is_unsuitable;
-        __ JumpIfNotSmi(lhs, &not_smi);
-        if (IsPowerOf2(constant_rhs_)) {
-          if (op_ == Token::MOD) {
-            __ and_(rhs,
-                    lhs,
-                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
-                    SetCC);
-            // We now have the answer, but if the input was negative we also
-            // have the sign bit.  Our work is done if the result is
-            // positive or zero:
-            if (!rhs.is(r0)) {
-              __ mov(r0, rhs, LeaveCC, pl);
-            }
-            __ Ret(pl);
-            // A mod of a negative left hand side must return a negative number.
-            // Unfortunately if the answer is 0 then we must return -0.  And we
-            // already optimistically trashed rhs so we may need to restore it.
-            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
-            // Next two instructions are conditional on the answer being -0.
-            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
-            __ b(eq, &lhs_is_unsuitable);
-            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
-            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
-          } else {
-            ASSERT(op_ == Token::DIV);
-            __ tst(lhs,
-                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
-            __ b(ne, &lhs_is_unsuitable);  // Go slow on negative or remainder.
-            int shift = 0;
-            int d = constant_rhs_;
-            while ((d & 1) == 0) {
-              d >>= 1;
-              shift++;
-            }
-            __ mov(r0, Operand(lhs, LSR, shift));
-            __ bic(r0, r0, Operand(kSmiTagMask));
-          }
-        } else {
-          // Not a power of 2.
-          __ tst(lhs, Operand(0x80000000u));
-          __ b(ne, &lhs_is_unsuitable);
-          // Find a fixed point reciprocal of the divisor so we can divide by
-          // multiplying.
-          double divisor = 1.0 / constant_rhs_;
-          int shift = 32;
-          double scale = 4294967296.0;  // 1 << 32.
-          uint32_t mul;
-          // Maximise the precision of the fixed point reciprocal.
-          while (true) {
-            mul = static_cast<uint32_t>(scale * divisor);
-            if (mul >= 0x7fffffff) break;
-            scale *= 2.0;
-            shift++;
-          }
-          mul++;
-          Register scratch2 = smi_test_reg;
-          smi_test_reg = no_reg;
-          __ mov(scratch2, Operand(mul));
-          __ umull(scratch, scratch2, scratch2, lhs);
-          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
-          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
-          // rhs is still the known rhs.  rhs is Smi tagged.
-          // lhs is still the unkown lhs.  lhs is Smi tagged.
-          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
-          // scratch = scratch2 * rhs.
-          MultiplyByKnownIntInStub(masm,
-                                   scratch,
-                                   scratch2,
-                                   rhs,
-                                   constant_rhs_,
-                                   &required_scratch_shift);
-          // scratch << required_scratch_shift is now the Smi tagged rhs *
-          // (lhs / rhs) where / indicates integer division.
-          if (op_ == Token::DIV) {
-            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
-            __ b(ne, &lhs_is_unsuitable);  // There was a remainder.
-            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
-          } else {
-            ASSERT(op_ == Token::MOD);
-            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
-          }
-        }
-        __ Ret();
-        __ bind(&lhs_is_unsuitable);
-      } else if (op_ == Token::MOD &&
-                 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-                 runtime_operands_type_ != BinaryOpIC::STRINGS) {
-        // Do generate a bit of smi code for modulus even though the default for
-        // modulus is not to do it, but as the ARM processor has no coprocessor
-        // support for modulus checking for smis makes sense.  We can handle
-        // 1 to 25 times any power of 2.  This covers over half the numbers from
-        // 1 to 100 including all of the first 25.  (Actually the constants < 10
-        // are handled above by reciprocal multiplication.  We only get here for
-        // those cases if the right hand side is not a constant or for cases
-        // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
-        // stub.)
-        Label slow;
-        Label not_power_of_2;
-        ASSERT(!ShouldGenerateSmiCode());
-        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
-        // Check for two positive smis.
-        __ orr(smi_test_reg, lhs, Operand(rhs));
-        __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
-        __ b(ne, &slow);
-        // Check that rhs is a power of two and not zero.
-        Register mask_bits = r3;
-        __ sub(scratch, rhs, Operand(1), SetCC);
-        __ b(mi, &slow);
-        __ and_(mask_bits, rhs, Operand(scratch), SetCC);
-        __ b(ne, &not_power_of_2);
-        // Calculate power of two modulus.
-        __ and_(result, lhs, Operand(scratch));
-        __ Ret();
-
-        __ bind(&not_power_of_2);
-        __ eor(scratch, scratch, Operand(mask_bits));
-        // At least two bits are set in the modulus.  The high one(s) are in
-        // mask_bits and the low one is scratch + 1.
-        __ and_(mask_bits, scratch, Operand(lhs));
-        Register shift_distance = scratch;
-        scratch = no_reg;
-
-        // The rhs consists of a power of 2 multiplied by some odd number.
-        // The power-of-2 part we handle by putting the corresponding bits
-        // from the lhs in the mask_bits register, and the power in the
-        // shift_distance register.  Shift distance is never 0 due to Smi
-        // tagging.
-        __ CountLeadingZeros(r4, shift_distance, shift_distance);
-        __ rsb(shift_distance, r4, Operand(32));
-
-        // Now we need to find out what the odd number is. The last bit is
-        // always 1.
-        Register odd_number = r4;
-        __ mov(odd_number, Operand(rhs, LSR, shift_distance));
-        __ cmp(odd_number, Operand(25));
-        __ b(gt, &slow);
-
-        IntegerModStub stub(
-            result, shift_distance, odd_number, mask_bits, lhs, r5);
-        __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);  // Tail call.
-
-        __ bind(&slow);
-      }
-      HandleBinaryOpSlowCases(
-          masm,
-          &not_smi,
-          lhs,
-          rhs,
-          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
-      break;
-    }
-
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SAR:
-    case Token::SHR:
-    case Token::SHL: {
-      Label slow;
-      STATIC_ASSERT(kSmiTag == 0);  // adjust code below
-      __ tst(smi_test_reg, Operand(kSmiTagMask));
-      __ b(ne, &slow);
-      Register scratch2 = smi_test_reg;
-      smi_test_reg = no_reg;
-      switch (op_) {
-        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
-        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
-        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
-        case Token::SAR:
-          // Remove tags from right operand.
-          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
-          __ mov(result, Operand(lhs, ASR, scratch2));
-          // Smi tag result.
-          __ bic(result, result, Operand(kSmiTagMask));
-          break;
-        case Token::SHR:
-          // Remove tags from operands.  We can't do this on a 31 bit number
-          // because then the 0s get shifted into bit 30 instead of bit 31.
-          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
-          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
-          __ mov(scratch, Operand(scratch, LSR, scratch2));
-          // Unsigned shift is not allowed to produce a negative number, so
-          // check the sign bit and the sign bit after Smi tagging.
-          __ tst(scratch, Operand(0xc0000000));
-          __ b(ne, &slow);
-          // Smi tag result.
-          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
-          break;
-        case Token::SHL:
-          // Remove tags from operands.
-          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
-          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
-          __ mov(scratch, Operand(scratch, LSL, scratch2));
-          // Check that the signed result fits in a Smi.
-          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
-          __ b(mi, &slow);
-          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
-          break;
-        default: UNREACHABLE();
-      }
-      __ Ret();
-      __ bind(&slow);
-      HandleNonSmiBitwiseOp(masm, lhs, rhs);
-      break;
-    }
-
-    default: UNREACHABLE();
-  }
-  // This code should be unreachable.
-  __ stop("Unreachable");
-
-  // Generate an unreachable reference to the DEFAULT stub so that it can be
-  // found at the end of this stub when clearing ICs at GC.
-  // TODO(kaznacheev): Check performance impact and get rid of this.
-  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
-    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
-    __ CallStub(&uninit);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  Label get_result;
-
-  __ Push(r1, r0);
-
-  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
-  __ mov(r1, Operand(Smi::FromInt(op_)));
-  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
-  __ Push(r2, r1, r0);
-
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
@@ -2887,6 +1817,9 @@
     case TRBinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
+    case TRBinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
     case TRBinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
@@ -3077,7 +2010,7 @@
       // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
       // depending on whether VFP3 is available or not.
       FloatingPointHelper::Destination destination =
-          Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+          CpuFeatures::IsSupported(VFP3) &&
           op_ != Token::MOD ?
           FloatingPointHelper::kVFPRegisters :
           FloatingPointHelper::kCoreRegisters;
@@ -3132,6 +2065,9 @@
                                                          op_,
                                                          result,
                                                          scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
       }
       break;
     }
@@ -3190,7 +2126,7 @@
           // The code below for writing into heap numbers isn't capable of
           // writing the register as an unsigned int so we go to slow case if we
           // hit this case.
-          if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+          if (CpuFeatures::IsSupported(VFP3)) {
             __ b(mi, &result_not_a_smi);
           } else {
             __ b(mi, not_numbers);
@@ -3229,7 +2165,7 @@
       // result.
       __ mov(r0, Operand(r5));
 
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
         // mentioned above SHR needs to always produce a positive result.
         CpuFeatures::Scope scope(VFP3);
@@ -3261,6 +2197,7 @@
 // requested the code falls through. If number allocation is requested but a
 // heap number cannot be allocated the code jumps to the lable gc_required.
 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+    Label* use_runtime,
     Label* gc_required,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
   Label not_smis;
@@ -3282,7 +2219,7 @@
   // If heap number results are possible generate the result in an allocated
   // heap number.
   if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
-    GenerateFPOperation(masm, true, NULL, gc_required);
+    GenerateFPOperation(masm, true, use_runtime, gc_required);
   }
   __ bind(&not_smis);
 }
@@ -3294,11 +2231,14 @@
   if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
       result_type_ == TRBinaryOpIC::SMI) {
     // Only allow smi results.
-    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+    GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
   } else {
     // Allow heap number result and don't make a transition if a heap number
     // cannot be allocated.
-    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+    GenerateSmiCode(masm,
+                    &call_runtime,
+                    &call_runtime,
+                    ALLOW_HEAPNUMBER_RESULTS);
   }
 
   // Code falls through if the result is not returned as either a smi or heap
@@ -3320,6 +2260,36 @@
 }
 
 
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = r1;
+  Register right = r0;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
+  __ b(ge, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
   ASSERT(operands_type_ == TRBinaryOpIC::INT32);
 
@@ -3358,7 +2328,7 @@
     // Jump to type transition if they are not. The registers r0 and r1 (right
     // and left) are preserved for the runtime call.
     FloatingPointHelper::Destination destination =
-        Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
+        CpuFeatures::IsSupported(VFP3) &&
         op_ != Token::MOD ?
         FloatingPointHelper::kVFPRegisters :
         FloatingPointHelper::kCoreRegisters;
@@ -3485,6 +2455,9 @@
         // Call the C function to handle the double operation.
         FloatingPointHelper::CallCCodeForDoubleOperation(
             masm, op_, heap_number_result, scratch1);
+        if (FLAG_debug_code) {
+          __ stop("Unreachable code.");
+        }
       }
 
       break;
@@ -3545,7 +2518,7 @@
           // to return a heap number if we can.
           // The non vfp3 code does not support this special case, so jump to
           // runtime if we don't support it.
-          if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+          if (CpuFeatures::IsSupported(VFP3)) {
             __ b(mi,
                  (result_type_ <= TRBinaryOpIC::INT32) ? &transition
                                                        : &return_heap_number);
@@ -3571,16 +2544,16 @@
       __ Ret();
 
       __ bind(&return_heap_number);
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-        CpuFeatures::Scope scope(VFP3);
-        heap_number_result = r5;
-        GenerateHeapResultAllocation(masm,
-                                     heap_number_result,
-                                     heap_number_map,
-                                     scratch1,
-                                     scratch2,
-                                     &call_runtime);
+      heap_number_result = r5;
+      GenerateHeapResultAllocation(masm,
+                                   heap_number_result,
+                                   heap_number_map,
+                                   scratch1,
+                                   scratch2,
+                                   &call_runtime);
 
+      if (CpuFeatures::IsSupported(VFP3)) {
+        CpuFeatures::Scope scope(VFP3);
         if (op_ != Token::SHR) {
           // Convert the result to a floating point value.
           __ vmov(double_scratch.low(), r2);
@@ -3599,6 +2572,7 @@
       } else {
         // Tail call that writes the int32 in r2 to the heap number in r0, using
         // r3 as scratch. r0 is preserved and returned.
+        __ mov(r0, r5);
         WriteInt32ToHeapNumberStub stub(r2, r0, r3);
         __ TailCallStub(&stub);
       }
@@ -3665,7 +2639,7 @@
 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
   Label call_runtime, call_string_add_or_runtime;
 
-  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
 
   GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
 
@@ -3806,7 +2780,7 @@
   const Register cache_entry = r0;
   const bool tagged = (argument_type_ == TAGGED);
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     if (tagged) {
       // Argument is a number and is on stack and in r0.
@@ -3894,7 +2868,7 @@
        __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
     }
     __ Ret();
-  }  // if (Isolate::Current()->cpu_features()->IsSupported(VFP3))
+  }  // if (CpuFeatures::IsSupported(VFP3))
 
   __ bind(&calculate);
   if (tagged) {
@@ -3903,7 +2877,7 @@
         ExternalReference(RuntimeFunction(), masm->isolate());
     __ TailCallExternalReference(runtime_function, 1, 1);
   } else {
-    if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) UNREACHABLE();
+    if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
     CpuFeatures::Scope scope(VFP3);
 
     Label no_update;
@@ -4102,7 +3076,7 @@
       __ mov(r0, Operand(r2));
     }
 
-    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
       CpuFeatures::Scope scope(VFP3);
       __ vmov(s0, r1);
@@ -4143,7 +3117,7 @@
 void MathPowStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
 
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
 
     Label base_not_smi;
@@ -4737,7 +3711,7 @@
   __ b(ne, &slow);
 
   // Null is not instance of anything.
-  __ cmp(scratch, Operand(FACTORY->null_value()));
+  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
   __ b(ne, &object_not_null);
   __ mov(r0, Operand(Smi::FromInt(1)));
   __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -5235,7 +4209,7 @@
 
   __ bind(&failure);
   // For failure and exception return null.
-  __ mov(r0, Operand(FACTORY->null_value()));
+  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
@@ -5306,6 +4280,8 @@
   const int kMaxInlineLength = 100;
   Label slowcase;
   Label done;
+  Factory* factory = masm->isolate()->factory();
+
   __ ldr(r1, MemOperand(sp, kPointerSize * 2));
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -5340,7 +4316,7 @@
   // Interleave operations for better latency.
   __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
   __ add(r3, r0, Operand(JSRegExpResult::kSize));
-  __ mov(r4, Operand(FACTORY->empty_fixed_array()));
+  __ mov(r4, Operand(factory->empty_fixed_array()));
   __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
   __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
   __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
@@ -5361,13 +4337,13 @@
   // r5: Number of elements in array, untagged.
 
   // Set map.
-  __ mov(r2, Operand(FACTORY->fixed_array_map()));
+  __ mov(r2, Operand(factory->fixed_array_map()));
   __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
   // Set FixedArray length.
   __ mov(r6, Operand(r5, LSL, kSmiTagSize));
   __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
   // Fill contents of fixed-array with the-hole.
-  __ mov(r2, Operand(FACTORY->the_hole_value()));
+  __ mov(r2, Operand(factory->the_hole_value()));
   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   // Fill fixed array elements with hole.
   // r0: JSArray, tagged.
@@ -6807,7 +5783,7 @@
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or VFP3 is unsupported.
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
 
     // Load left and right operand
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 1dde255..0bb0025 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -71,162 +71,6 @@
 };
 
 
-class GenericBinaryOpStub : public CodeStub {
- public:
-  static const int kUnknownIntValue = -1;
-
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      Register lhs,
-                      Register rhs,
-                      int constant_rhs = kUnknownIntValue)
-      : op_(op),
-        mode_(mode),
-        lhs_(lhs),
-        rhs_(rhs),
-        constant_rhs_(constant_rhs),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) { }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        lhs_(LhsRegister(RegisterBits::decode(key))),
-        rhs_(RhsRegister(RegisterBits::decode(key))),
-        constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
-        specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
-        runtime_operands_type_(type_info),
-        name_(NULL) { }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  Register lhs_;
-  Register rhs_;
-  int constant_rhs_;
-  bool specialized_on_rhs_;
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-  char* name_;
-
-  static const int kMaxKnownRhs = 0x40000000;
-  static const int kKnownRhsKeyBits = 6;
-
-  // Minor key encoding in 17 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 6> {};
-  class TypeInfoBits: public BitField<int, 8, 3> {};
-  class RegisterBits: public BitField<bool, 11, 1> {};
-  class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
-           (lhs_.is(r1) && rhs_.is(r0)));
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | KnownIntBits::encode(MinorKeyForKnownInt())
-           | TypeInfoBits::encode(runtime_operands_type_)
-           | RegisterBits::encode(lhs_.is(r0));
-  }
-
-  void Generate(MacroAssembler* masm);
-  void HandleNonSmiBitwiseOp(MacroAssembler* masm,
-                             Register lhs,
-                             Register rhs);
-  void HandleBinaryOpSlowCases(MacroAssembler* masm,
-                               Label* not_smi,
-                               Register lhs,
-                               Register rhs,
-                               const Builtins::JavaScript& builtin);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
-    if (constant_rhs == kUnknownIntValue) return false;
-    if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
-    if (op == Token::MOD) {
-      if (constant_rhs <= 1) return false;
-      if (constant_rhs <= 10) return true;
-      if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
-      return false;
-    }
-    return false;
-  }
-
-  int MinorKeyForKnownInt() {
-    if (!specialized_on_rhs_) return 0;
-    if (constant_rhs_ <= 10) return constant_rhs_ + 1;
-    ASSERT(IsPowerOf2(constant_rhs_));
-    int key = 12;
-    int d = constant_rhs_;
-    while ((d & 1) == 0) {
-      key++;
-      d >>= 1;
-    }
-    ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
-    return key;
-  }
-
-  int KnownBitsForMinorKey(int key) {
-    if (!key) return 0;
-    if (key <= 11) return key - 1;
-    int d = 1;
-    while (key != 12) {
-      key--;
-      d <<= 1;
-    }
-    return d;
-  }
-
-  Register LhsRegister(bool lhs_is_r0) {
-    return lhs_is_r0 ? r0 : r1;
-  }
-
-  Register RhsRegister(bool lhs_is_r0) {
-    return lhs_is_r0 ? r1 : r0;
-  }
-
-  bool HasSmiSmiFastPath() {
-    return op_ != Token::DIV;
-  }
-
-  bool ShouldGenerateSmiCode() {
-    return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  const char* GetName();
-
-  virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
-  }
-
-#ifdef DEBUG
-  void Print() {
-    if (!specialized_on_rhs_) {
-      PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
-    } else {
-      PrintF("GenericBinaryOpStub (%s by %d)\n",
-             Token::String(op_),
-             constant_rhs_);
-    }
-  }
-#endif
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -235,7 +79,7 @@
         operands_type_(TRBinaryOpIC::UNINITIALIZED),
         result_type_(TRBinaryOpIC::UNINITIALIZED),
         name_(NULL) {
-    use_vfp3_ = Isolate::Current()->cpu_features()->IsSupported(VFP3);
+    use_vfp3_ = CpuFeatures::IsSupported(VFP3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
@@ -303,6 +147,7 @@
                            Label* not_numbers,
                            Label* gc_required);
   void GenerateSmiCode(MacroAssembler* masm,
+                       Label* use_runtime,
                        Label* gc_required,
                        SmiCodeGenerateHeapNumberResults heapnumber_results);
   void GenerateLoadArguments(MacroAssembler* masm);
@@ -313,6 +158,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
   void GenerateCallRuntime(MacroAssembler* masm);
@@ -413,102 +259,6 @@
 };
 
 
-// This stub can do a fast mod operation without using fp.
-// It is tail called from the GenericBinaryOpStub and it always
-// returns an answer.  It never causes GC so it doesn't need a real frame.
-//
-// The inputs are always positive Smis.  This is never called
-// where the denominator is a power of 2.  We handle that separately.
-//
-// If we consider the denominator as an odd number multiplied by a power of 2,
-// then:
-// * The exponent (power of 2) is in the shift_distance register.
-// * The odd number is in the odd_number register.  It is always in the range
-//   of 3 to 25.
-// * The bits from the numerator that are to be copied to the answer (there are
-//   shift_distance of them) are in the mask_bits register.
-// * The other bits of the numerator have been shifted down and are in the lhs
-//   register.
-class IntegerModStub : public CodeStub {
- public:
-  IntegerModStub(Register result,
-                 Register shift_distance,
-                 Register odd_number,
-                 Register mask_bits,
-                 Register lhs,
-                 Register scratch)
-      : result_(result),
-        shift_distance_(shift_distance),
-        odd_number_(odd_number),
-        mask_bits_(mask_bits),
-        lhs_(lhs),
-        scratch_(scratch) {
-    // We don't code these in the minor key, so they should always be the same.
-    // We don't really want to fix that since this stub is rather large and we
-    // don't want many copies of it.
-    ASSERT(shift_distance_.is(r9));
-    ASSERT(odd_number_.is(r4));
-    ASSERT(mask_bits_.is(r3));
-    ASSERT(scratch_.is(r5));
-  }
-
- private:
-  Register result_;
-  Register shift_distance_;
-  Register odd_number_;
-  Register mask_bits_;
-  Register lhs_;
-  Register scratch_;
-
-  // Minor key encoding in 16 bits.
-  class ResultRegisterBits: public BitField<int, 0, 4> {};
-  class LhsRegisterBits: public BitField<int, 4, 4> {};
-
-  Major MajorKey() { return IntegerMod; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return ResultRegisterBits::encode(result_.code())
-           | LhsRegisterBits::encode(lhs_.code());
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  const char* GetName() { return "IntegerModStub"; }
-
-  // Utility functions.
-  void DigitSum(MacroAssembler* masm,
-                Register lhs,
-                int mask,
-                int shift,
-                Label* entry);
-  void DigitSum(MacroAssembler* masm,
-                Register lhs,
-                Register scratch,
-                int mask,
-                int shift1,
-                int shift2,
-                Label* entry);
-  void ModGetInRangeBySubtraction(MacroAssembler* masm,
-                                  Register lhs,
-                                  int shift,
-                                  int rhs);
-  void ModReduce(MacroAssembler* masm,
-                 Register lhs,
-                 int max,
-                 int denominator);
-  void ModAnswer(MacroAssembler* masm,
-                 Register result,
-                 Register shift_distance,
-                 Register mask_bits,
-                 Register sum_of_digits);
-
-
-#ifdef DEBUG
-  void Print() { PrintF("IntegerModStub\n"); }
-#endif
-};
-
-
 // This stub can convert a signed int32 to a heap number (double).  It does
 // not work for int32s that are in Smi range!  No GC occurs during this stub
 // so you don't have to set up the frame.
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
deleted file mode 100644
index 81ed2d0..0000000
--- a/src/arm/codegen-arm-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_ARM_CODEGEN_ARM_INL_H_
-#define V8_ARM_CODEGEN_ARM_INL_H_
-
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 91c4747..bf748a9 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,56 +29,14 @@
 
 #if defined(V8_TARGET_ARCH_ARM)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "jsregexp.h"
-#include "jump-target-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-#include "virtual-frame-arm-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  // On ARM you either have a completely spilled frame or you
-  // handle it yourself, but at the moment there's no automation
-  // of registers and deferred code.
-}
-
-
-void DeferredCode::RestoreRegisters() {
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->frame()->AssertIsSpilled();
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -89,7348 +47,6 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      previous_(owner->state()) {
-  owner->set_state(this);
-}
-
-
-ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
-                                             JumpTarget* true_target,
-                                             JumpTarget* false_target)
-    : CodeGenState(owner),
-      true_target_(true_target),
-      false_target_(false_target) {
-  owner->set_state(this);
-}
-
-
-TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
-                                           Slot* slot,
-                                           TypeInfo type_info)
-    : CodeGenState(owner),
-      slot_(slot) {
-  owner->set_state(this);
-  old_type_info_ = owner->set_type_info(slot, type_info);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-TypeInfoCodeGenState::~TypeInfoCodeGenState() {
-  owner()->set_type_info(slot_, old_type_info_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      cc_reg_(al),
-      state_(NULL),
-      loop_nesting_(0),
-      type_info_(NULL),
-      function_return_(JumpTarget::BIDIRECTIONAL),
-      function_return_is_shadowed_(false) {
-}
-
-
-// Calling conventions:
-// fp: caller's frame pointer
-// sp: stack pointer
-// r1: called JS function
-// cp: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-
-  int slots = scope()->num_parameters() + scope()->num_stack_slots();
-  ScopedVector<TypeInfo> type_info_array(slots);
-  for (int i = 0; i < slots; i++) {
-    type_info_array[i] = TypeInfo::Unknown();
-  }
-  type_info_ = &type_info_array;
-
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  cc_reg_ = al;
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  {
-    CodeGenState state(this);
-
-    // Entry:
-    // Stack: receiver, arguments
-    // lr: return address
-    // fp: caller's frame pointer
-    // sp: stack pointer
-    // r1: called JS function
-    // cp: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ stop("stop-at");
-    }
-#endif
-
-    frame_->Enter();
-    // tos: code slot
-
-    // Allocate space for locals and initialize them.  This also checks
-    // for stack overflow.
-    frame_->AllocateStackSlots();
-
-    frame_->AssertIsSpilled();
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      __ ldr(r0, frame_->Function());
-      frame_->EmitPush(r0);
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        frame_->CallStub(&stub, 1);
-      } else {
-        frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-#ifdef DEBUG
-      JumpTarget verified_true;
-      __ cmp(r0, cp);
-      verified_true.Branch(eq);
-      __ stop("NewContext: r0 is expected to be the same as cp");
-      verified_true.Bind();
-#endif
-      // Update context local.
-      __ str(cp, frame_->Context());
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      frame_->AssertIsSpilled();
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          ASSERT(!scope()->is_global_scope());  // No params in global scope.
-          __ ldr(r1, frame_->ParameterAt(i));
-          // Loads r2 with context; used below in RecordWrite.
-          __ str(r1, SlotOperand(slot, r2));
-          // Load the offset into r3.
-          int slot_offset =
-              FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          __ RecordWrite(r2, Operand(slot_offset), r3, r1);
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.SetExpectedHeight();
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-    }
-  }
-
-  // Handle the return from the function.
-  if (has_valid_frame()) {
-    // If there is a valid frame, control flow can fall off the end of
-    // the body.  In that case there is an implicit return statement.
-    ASSERT(!function_return_is_shadowed_);
-    frame_->PrepareForReturn();
-    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-    if (function_return_.is_bound()) {
-      function_return_.Jump();
-    } else {
-      function_return_.Bind();
-      GenerateReturnSequence();
-    }
-  } else if (function_return_.is_linked()) {
-    // If the return target has dangling jumps to it, then we have not
-    // yet generated the return sequence.  This can happen when (a)
-    // control does not flow off the end of the body so we did not
-    // compile an artificial return statement just above, and (b) there
-    // are return statements in the body but (c) they are all shadowed.
-    function_return_.Bind();
-    GenerateReturnSequence();
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT(loop_nesting_ == info->is_in_loop()? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(!has_cc());
-  ASSERT(state_ == NULL);
-  ASSERT(loop_nesting() == 0);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    ProcessDeferred();
-  }
-
-  allocator_ = NULL;
-  type_info_ = NULL;
-}
-
-
-int CodeGenerator::NumberOfSlot(Slot* slot) {
-  if (slot == NULL) return kInvalidSlotNumber;
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return slot->index();
-    case Slot::LOCAL:
-      return slot->index() + scope()->num_parameters();
-    default:
-      break;
-  }
-  return kInvalidSlotNumber;
-}
-
-
-MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(cp));  // do not overwrite context register
-      Register context = cp;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return MemOperand(r0, 0);
-  }
-}
-
-
-MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
-    Slot* slot,
-    Register tmp,
-    Register tmp2,
-    JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  Register context = cp;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
-        __ tst(tmp2, tmp2);
-        slow->Branch(ne);
-      }
-      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-      context = tmp;
-    }
-  }
-  // Check that last extension is NULL.
-  __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
-  __ tst(tmp2, tmp2);
-  slow->Branch(ne);
-  __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp, slot->index());
-}
-
-
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
-void CodeGenerator::LoadCondition(Expression* x,
-                                  JumpTarget* true_target,
-                                  JumpTarget* false_target,
-                                  bool force_cc) {
-  ASSERT(!has_cc());
-  int original_height = frame_->height();
-
-  { ConditionCodeGenState new_state(this, true_target, false_target);
-    Visit(x);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        has_valid_frame() &&
-        !has_cc() &&
-        frame_->height() == original_height) {
-      true_target->Jump();
-    }
-  }
-  if (force_cc && frame_ != NULL && !has_cc()) {
-    // Convert the TOS value to a boolean in the condition code register.
-    ToBoolean(true_target, false_target);
-  }
-  ASSERT(!force_cc || !has_valid_frame() || has_cc());
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-  // We generally assume that we are not in a spilled scope for most
-  // of the code generator.  A failure to ensure this caused issue 815
-  // and this assert is designed to catch similar issues.
-  frame_->AssertIsNotSpilled();
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  JumpTarget true_target;
-  JumpTarget false_target;
-  LoadCondition(expr, &true_target, &false_target, false);
-
-  if (has_cc()) {
-    // Convert cc_reg_ into a boolean value.
-    JumpTarget loaded;
-    JumpTarget materialize_true;
-    materialize_true.Branch(cc_reg_);
-    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
-    loaded.Jump();
-    materialize_true.Bind();
-    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
-    loaded.Bind();
-    cc_reg_ = al;
-  }
-
-  if (true_target.is_linked() || false_target.is_linked()) {
-    // We have at least one condition value that has been "translated"
-    // into a branch, thus it needs to be loaded explicitly.
-    JumpTarget loaded;
-    if (frame_ != NULL) {
-      loaded.Jump();  // Don't lose the current TOS.
-    }
-    bool both = true_target.is_linked() && false_target.is_linked();
-    // Load "true" if necessary.
-    if (true_target.is_linked()) {
-      true_target.Bind();
-      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
-    }
-    // If both "true" and "false" need to be loaded jump across the code for
-    // "false".
-    if (both) {
-      loaded.Jump();
-    }
-    // Load "false" if necessary.
-    if (false_target.is_linked()) {
-      false_target.Bind();
-      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
-    }
-    // A value is loaded on all paths reaching this point.
-    loaded.Bind();
-  }
-  ASSERT(has_valid_frame());
-  ASSERT(!has_cc());
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  Register reg = frame_->GetTOSRegister();
-  __ ldr(reg, GlobalObjectOperand());
-  frame_->EmitPush(reg);
-}
-
-
-void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  Register reg = frame_->GetTOSRegister();
-  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(reg,
-         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->EmitPush(reg);
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-void CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the hole value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
-  } else {
-    frame_->SpillAll();
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    __ ldr(r2, frame_->Function());
-    // The receiver is below the arguments, the return address, and the
-    // frame pointer on the stack.
-    const int kReceiverDisplacement = 2 + scope()->num_parameters();
-    __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
-    __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-    frame_->Adjust(3);
-    __ Push(r2, r1, r0);
-    frame_->CallStub(&stub, 3);
-    frame_->EmitPush(r0);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-    Register arguments = frame_->PopToRegister();
-    __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
-    __ cmp(arguments, ip);
-    done.Branch(ne);
-  }
-  StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-  if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  // We generally assume that we are not in a spilled scope for most
-  // of the code generator.  A failure to ensure this caused issue 815
-  // and this assert is designed to catch similar issues.
-  cgen->frame()->AssertIsNotSpilled();
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  int size = ref->size();
-  ref->set_unloaded();
-  if (size == 0) return;
-
-  // Pop a reference from the stack while preserving TOS.
-  VirtualFrame::RegisterAllocationScope scope(this);
-  Comment cmnt(masm_, "[ UnloadReference");
-  if (size > 0) {
-    Register tos = frame_->PopToRegister();
-    frame_->Drop(size);
-    frame_->EmitPush(tos);
-  }
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
-// register to a boolean in the condition code register. The code
-// may jump to 'false_target' in case the register converts to 'false'.
-void CodeGenerator::ToBoolean(JumpTarget* true_target,
-                              JumpTarget* false_target) {
-  // Note: The generated code snippet does not change stack variables.
-  //       Only the condition code should be set.
-  bool known_smi = frame_->KnownSmiAt(0);
-  Register tos = frame_->PopToRegister();
-
-  // Fast case checks
-
-  // Check if the value is 'false'.
-  if (!known_smi) {
-    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-    __ cmp(tos, ip);
-    false_target->Branch(eq);
-
-    // Check if the value is 'true'.
-    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-    __ cmp(tos, ip);
-    true_target->Branch(eq);
-
-    // Check if the value is 'undefined'.
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(tos, ip);
-    false_target->Branch(eq);
-  }
-
-  // Check if the value is a smi.
-  __ cmp(tos, Operand(Smi::FromInt(0)));
-
-  if (!known_smi) {
-    false_target->Branch(eq);
-    __ tst(tos, Operand(kSmiTagMask));
-    true_target->Branch(eq);
-
-    // Slow case.
-    if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-      CpuFeatures::Scope scope(VFP3);
-      // Implements the slow case by using ToBooleanStub.
-      // The ToBooleanStub takes a single argument, and
-      // returns a non-zero value for true, or zero for false.
-      // Both the argument value and the return value use the
-      // register assigned to tos_
-      ToBooleanStub stub(tos);
-      frame_->CallStub(&stub, 0);
-      // Convert the result in "tos" to a condition code.
-      __ cmp(tos, Operand(0, RelocInfo::NONE));
-    } else {
-      // Implements slow case by calling the runtime.
-      frame_->EmitPush(tos);
-      frame_->CallRuntime(Runtime::kToBool, 1);
-      // Convert the result (r0) to a condition code.
-      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-      __ cmp(r0, ip);
-    }
-  }
-
-  cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenericBinaryOperation(Token::Value op,
-                                           OverwriteMode overwrite_mode,
-                                           GenerateInlineSmi inline_smi,
-                                           int constant_rhs) {
-  // top of virtual frame: y
-  // 2nd elt. on virtual frame : x
-  // result : top of virtual frame
-
-  // Stub is entered with a call: 'return address' is in lr.
-  switch (op) {
-    case Token::ADD:
-    case Token::SUB:
-      if (inline_smi) {
-        JumpTarget done;
-        Register rhs = frame_->PopToRegister();
-        Register lhs = frame_->PopToRegister(rhs);
-        Register scratch = VirtualFrame::scratch0();
-        __ orr(scratch, rhs, Operand(lhs));
-        // Check they are both small and positive.
-        __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
-        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
-        STATIC_ASSERT(kSmiTag == 0);
-        if (op == Token::ADD) {
-          __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
-        } else {
-          __ sub(r0, lhs, Operand(rhs), LeaveCC, eq);
-        }
-        done.Branch(eq);
-        GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
-        frame_->SpillAll();
-        frame_->CallStub(&stub, 0);
-        done.Bind();
-        frame_->EmitPush(r0);
-        break;
-      } else {
-        // Fall through!
-      }
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-      if (inline_smi) {
-        bool rhs_is_smi = frame_->KnownSmiAt(0);
-        bool lhs_is_smi = frame_->KnownSmiAt(1);
-        Register rhs = frame_->PopToRegister();
-        Register lhs = frame_->PopToRegister(rhs);
-        Register smi_test_reg;
-        Condition cond;
-        if (!rhs_is_smi || !lhs_is_smi) {
-          if (rhs_is_smi) {
-            smi_test_reg = lhs;
-          } else if (lhs_is_smi) {
-            smi_test_reg = rhs;
-          } else {
-            smi_test_reg = VirtualFrame::scratch0();
-            __ orr(smi_test_reg, rhs, Operand(lhs));
-          }
-          // Check they are both Smis.
-          __ tst(smi_test_reg, Operand(kSmiTagMask));
-          cond = eq;
-        } else {
-          cond = al;
-        }
-        ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
-        if (op == Token::BIT_OR) {
-          __ orr(r0, lhs, Operand(rhs), LeaveCC, cond);
-        } else if (op == Token::BIT_AND) {
-          __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
-        } else {
-          ASSERT(op == Token::BIT_XOR);
-          STATIC_ASSERT(kSmiTag == 0);
-          __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
-        }
-        if (cond != al) {
-          JumpTarget done;
-          done.Branch(cond);
-          GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
-          frame_->SpillAll();
-          frame_->CallStub(&stub, 0);
-          done.Bind();
-        }
-        frame_->EmitPush(r0);
-        break;
-      } else {
-        // Fall through!
-      }
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR: {
-      Register rhs = frame_->PopToRegister();
-      Register lhs = frame_->PopToRegister(rhs);  // Don't pop to rhs register.
-      GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
-      frame_->SpillAll();
-      frame_->CallStub(&stub, 0);
-      frame_->EmitPush(r0);
-      break;
-    }
-
-    case Token::COMMA: {
-      Register scratch = frame_->PopToRegister();
-      // Simply discard left value.
-      frame_->Drop();
-      frame_->EmitPush(scratch);
-      break;
-    }
-
-    default:
-      // Other cases should have been handled before this point.
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             int value,
-                             bool reversed,
-                             OverwriteMode overwrite_mode,
-                             Register tos)
-      : op_(op),
-        value_(value),
-        reversed_(reversed),
-        overwrite_mode_(overwrite_mode),
-        tos_register_(tos) {
-    set_comment("[ DeferredInlinedSmiOperation");
-  }
-
-  virtual void Generate();
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
-  // methods, it is the responsibility of the deferred code to save and restore
-  // registers.
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToNonSmiInput(Condition cond);
-  void JumpToAnswerOutOfRange(Condition cond);
-
- private:
-  void GenerateNonSmiInput();
-  void GenerateAnswerOutOfRange();
-  void WriteNonSmiAnswer(Register answer,
-                         Register heap_number,
-                         Register scratch);
-
-  Token::Value op_;
-  int value_;
-  bool reversed_;
-  OverwriteMode overwrite_mode_;
-  Register tos_register_;
-  Label non_smi_input_;
-  Label answer_out_of_range_;
-};
-
-
-// For bit operations we try harder and handle the case where the input is not
-// a Smi but a 32bits integer without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
-  ASSERT(Token::IsBitOp(op_));
-
-  __ b(cond, &non_smi_input_);
-}
-
-
-// For bit operations the result is always 32bits so we handle the case where
-// the result does not fit in a Smi without calling the generic stub.
-void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
-  ASSERT(Token::IsBitOp(op_));
-
-  if ((op_ == Token::SHR) &&
-      !Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    // >>> requires an unsigned to double conversion and the non VFP code
-    // does not support this conversion.
-    __ b(cond, entry_label());
-  } else {
-    __ b(cond, &answer_out_of_range_);
-  }
-}
-
-
-// On entry the non-constant side of the binary operation is in tos_register_
-// and the constant smi side is nowhere.  The tos_register_ is not used by the
-// virtual frame.  On exit the answer is in the tos_register_ and the virtual
-// frame is unchanged.
-void DeferredInlineSmiOperation::Generate() {
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  Register lhs = r1;
-  Register rhs = r0;
-  switch (op_) {
-    case Token::ADD: {
-      // Revert optimistic add.
-      if (reversed_) {
-        __ sub(r0, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r1, Operand(Smi::FromInt(value_)));
-      } else {
-        __ sub(r1, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r0, Operand(Smi::FromInt(value_)));
-      }
-      break;
-    }
-
-    case Token::SUB: {
-      // Revert optimistic sub.
-      if (reversed_) {
-        __ rsb(r0, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r1, Operand(Smi::FromInt(value_)));
-      } else {
-        __ add(r1, tos_register_, Operand(Smi::FromInt(value_)));
-        __ mov(r0, Operand(Smi::FromInt(value_)));
-      }
-      break;
-    }
-
-    // For these operations there is no optimistic operation that needs to be
-    // reverted.
-    case Token::MUL:
-    case Token::MOD:
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR: {
-      if (tos_register_.is(r1)) {
-        __ mov(r0, Operand(Smi::FromInt(value_)));
-      } else {
-        ASSERT(tos_register_.is(r0));
-        __ mov(r1, Operand(Smi::FromInt(value_)));
-      }
-      if (reversed_ == tos_register_.is(r1)) {
-          lhs = r0;
-          rhs = r1;
-      }
-      break;
-    }
-
-    default:
-      // Other cases should have been handled before this point.
-      UNREACHABLE();
-      break;
-  }
-
-  GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
-  __ CallStub(&stub);
-
-  // The generic stub returns its value in r0, but that's not
-  // necessarily what we want.  We want whatever the inlined code
-  // expected, which is that the answer is in the same register as
-  // the operand was.
-  __ Move(tos_register_, r0);
-
-  // The tos register was not in use for the virtual frame that we
-  // came into this function with, so we can merge back to that frame
-  // without trashing it.
-  copied_frame.MergeTo(frame_state()->frame());
-
-  Exit();
-
-  if (non_smi_input_.is_linked()) {
-    GenerateNonSmiInput();
-  }
-
-  if (answer_out_of_range_.is_linked()) {
-    GenerateAnswerOutOfRange();
-  }
-}
-
-
-// Convert and write the integer answer into heap_number.
-void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
-                                                   Register heap_number,
-                                                   Register scratch) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(s0, answer);
-    if (op_ == Token::SHR) {
-      __ vcvt_f64_u32(d0, s0);
-    } else {
-      __ vcvt_f64_s32(d0, s0);
-    }
-    __ sub(scratch, heap_number, Operand(kHeapObjectTag));
-    __ vstr(d0, scratch, HeapNumber::kValueOffset);
-  } else {
-    WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
-    __ CallStub(&stub);
-  }
-}
-
-
-void DeferredInlineSmiOperation::GenerateNonSmiInput() {
-  // We know the left hand side is not a Smi and the right hand side is an
-  // immediate value (value_) which can be represented as a Smi. We only
-  // handle bit operations.
-  ASSERT(Token::IsBitOp(op_));
-
-  if (FLAG_debug_code) {
-    __ Abort("Should not fall through!");
-  }
-
-  __ bind(&non_smi_input_);
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(tos_register_);
-  }
-
-  // This routine uses the registers from r2 to r6.  At the moment they are
-  // not used by the register allocator, but when they are it should use
-  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
-  Register heap_number_map = r7;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
-  __ cmp(r3, heap_number_map);
-  // Not a number, fall back to the GenericBinaryOpStub.
-  __ b(ne, entry_label());
-
-  Register int32 = r2;
-  // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
-  __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
-
-  // tos_register_ (r0 or r1): Original heap number.
-  // int32: signed 32bits int.
-
-  Label result_not_a_smi;
-  int shift_value = value_ & 0x1f;
-  switch (op_) {
-    case Token::BIT_OR:  __ orr(int32, int32, Operand(value_)); break;
-    case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
-    case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
-    case Token::SAR:
-      ASSERT(!reversed_);
-      if (shift_value != 0) {
-         __ mov(int32, Operand(int32, ASR, shift_value));
-      }
-      break;
-    case Token::SHR:
-      ASSERT(!reversed_);
-      if (shift_value != 0) {
-        __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
-      } else {
-        // SHR is special because it is required to produce a positive answer.
-        __ cmp(int32, Operand(0, RelocInfo::NONE));
-      }
-      if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-        __ b(mi, &result_not_a_smi);
-      } else {
-        // Non VFP code cannot convert from unsigned to double, so fall back
-        // to GenericBinaryOpStub.
-        __ b(mi, entry_label());
-      }
-      break;
-    case Token::SHL:
-      ASSERT(!reversed_);
-      if (shift_value != 0) {
-        __ mov(int32, Operand(int32, LSL, shift_value));
-      }
-      break;
-    default: UNREACHABLE();
-  }
-  // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
-  // if the shift if more than 0 or SHR if the shit is more than 1.
-  if (!( (op_ == Token::AND && value_ >= 0) ||
-        ((op_ == Token::SAR) && (shift_value > 0)) ||
-        ((op_ == Token::SHR) && (shift_value > 1)))) {
-    __ add(r3, int32, Operand(0x40000000), SetCC);
-    __ b(mi, &result_not_a_smi);
-  }
-  __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
-  Exit();
-
-  if (result_not_a_smi.is_linked()) {
-    __ bind(&result_not_a_smi);
-    if (overwrite_mode_ != OVERWRITE_LEFT) {
-      ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
-             (overwrite_mode_ == OVERWRITE_RIGHT));
-      // If the allocation fails, fall back to the GenericBinaryOpStub.
-      __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
-      // Nothing can go wrong now, so overwrite tos.
-      __ mov(tos_register_, Operand(r4));
-    }
-
-    // int32: answer as signed 32bits integer.
-    // tos_register_: Heap number to write the answer into.
-    WriteNonSmiAnswer(int32, tos_register_, r3);
-
-    Exit();
-  }
-}
-
-
-void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
-  // The input from a bitwise operation were Smis but the result cannot fit
-  // into a Smi, so we store it into a heap number. VirtualFrame::scratch0()
-  // holds the untagged result to be converted.  tos_register_ contains the
-  // input.  See the calls to JumpToAnswerOutOfRange to see how we got here.
-  ASSERT(Token::IsBitOp(op_));
-  ASSERT(!reversed_);
-
-  Register untagged_result = VirtualFrame::scratch0();
-
-  if (FLAG_debug_code) {
-    __ Abort("Should not fall through!");
-  }
-
-  __ bind(&answer_out_of_range_);
-  if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
-    // >>> 0 is a special case where the untagged_result register is not set up
-    // yet.  We untag the input to get it.
-    __ mov(untagged_result, Operand(tos_register_, ASR, kSmiTagSize));
-  }
-
-  // This routine uses the registers from r2 to r6.  At the moment they are
-  // not used by the register allocator, but when they are it should use
-  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
-
-  // Allocate the result heap number.
-  Register heap_number_map = VirtualFrame::scratch1();
-  Register heap_number = r4;
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // If the allocation fails, fall back to the GenericBinaryOpStub.
-  __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
-  WriteNonSmiAnswer(untagged_result, heap_number, r3);
-  __ mov(tos_register_, Operand(heap_number));
-
-  Exit();
-}
-
-
-static bool PopCountLessThanEqual2(unsigned int x) {
-  x &= x - 1;
-  return (x & (x - 1)) == 0;
-}
-
-
-// Returns the index of the lowest bit set.
-static int BitPosition(unsigned x) {
-  int bit_posn = 0;
-  while ((x & 0xf) == 0) {
-    bit_posn += 4;
-    x >>= 4;
-  }
-  while ((x & 1) == 0) {
-    bit_posn++;
-    x >>= 1;
-  }
-  return bit_posn;
-}
-
-
-// Can we multiply by x with max two shifts and an add.
-// This answers yes to all integers from 2 to 10.
-static bool IsEasyToMultiplyBy(int x) {
-  if (x < 2) return false;                          // Avoid special cases.
-  if (x > (Smi::kMaxValue + 1) >> 2) return false;  // Almost always overflows.
-  if (IsPowerOf2(x)) return true;                   // Simple shift.
-  if (PopCountLessThanEqual2(x)) return true;       // Shift and add and shift.
-  if (IsPowerOf2(x + 1)) return true;               // Patterns like 11111.
-  return false;
-}
-
-
-// Can multiply by anything that IsEasyToMultiplyBy returns true for.
-// Source and destination may be the same register.  This routine does
-// not set carry and overflow the way a mul instruction would.
-static void InlineMultiplyByKnownInt(MacroAssembler* masm,
-                                     Register source,
-                                     Register destination,
-                                     int known_int) {
-  if (IsPowerOf2(known_int)) {
-    masm->mov(destination, Operand(source, LSL, BitPosition(known_int)));
-  } else if (PopCountLessThanEqual2(known_int)) {
-    int first_bit = BitPosition(known_int);
-    int second_bit = BitPosition(known_int ^ (1 << first_bit));
-    masm->add(destination, source,
-              Operand(source, LSL, second_bit - first_bit));
-    if (first_bit != 0) {
-      masm->mov(destination, Operand(destination, LSL, first_bit));
-    }
-  } else {
-    ASSERT(IsPowerOf2(known_int + 1));  // Patterns like 1111.
-    int the_bit = BitPosition(known_int + 1);
-    masm->rsb(destination, source, Operand(source, LSL, the_bit));
-  }
-}
-
-
-void CodeGenerator::SmiOperation(Token::Value op,
-                                 Handle<Object> value,
-                                 bool reversed,
-                                 OverwriteMode mode) {
-  int int_value = Smi::cast(*value)->value();
-
-  bool both_sides_are_smi = frame_->KnownSmiAt(0);
-
-  bool something_to_inline;
-  switch (op) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::BIT_AND:
-    case Token::BIT_OR:
-    case Token::BIT_XOR: {
-      something_to_inline = true;
-      break;
-    }
-    case Token::SHL: {
-      something_to_inline = (both_sides_are_smi || !reversed);
-      break;
-    }
-    case Token::SHR:
-    case Token::SAR: {
-      if (reversed) {
-        something_to_inline = false;
-      } else {
-        something_to_inline = true;
-      }
-      break;
-    }
-    case Token::MOD: {
-      if (reversed || int_value < 2 || !IsPowerOf2(int_value)) {
-        something_to_inline = false;
-      } else {
-        something_to_inline = true;
-      }
-      break;
-    }
-    case Token::MUL: {
-      if (!IsEasyToMultiplyBy(int_value)) {
-        something_to_inline = false;
-      } else {
-        something_to_inline = true;
-      }
-      break;
-    }
-    default: {
-      something_to_inline = false;
-      break;
-    }
-  }
-
-  if (!something_to_inline) {
-    if (!reversed) {
-      // Push the rhs onto the virtual frame by putting it in a TOS register.
-      Register rhs = frame_->GetTOSRegister();
-      __ mov(rhs, Operand(value));
-      frame_->EmitPush(rhs, TypeInfo::Smi());
-      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI, int_value);
-    } else {
-      // Pop the rhs, then push lhs and rhs in the right order.  Only performs
-      // at most one pop, the rest takes place in TOS registers.
-      Register lhs = frame_->GetTOSRegister();    // Get reg for pushing.
-      Register rhs = frame_->PopToRegister(lhs);  // Don't use lhs for this.
-      __ mov(lhs, Operand(value));
-      frame_->EmitPush(lhs, TypeInfo::Smi());
-      TypeInfo t = both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Unknown();
-      frame_->EmitPush(rhs, t);
-      GenericBinaryOperation(op, mode, GENERATE_INLINE_SMI,
-                             GenericBinaryOpStub::kUnknownIntValue);
-    }
-    return;
-  }
-
-  // We move the top of stack to a register (normally no move is invoved).
-  Register tos = frame_->PopToRegister();
-  switch (op) {
-    case Token::ADD: {
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
-      __ add(tos, tos, Operand(value), SetCC);
-      deferred->Branch(vs);
-      if (!both_sides_are_smi) {
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-      }
-      deferred->BindExit();
-      frame_->EmitPush(tos);
-      break;
-    }
-
-    case Token::SUB: {
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-
-      if (reversed) {
-        __ rsb(tos, tos, Operand(value), SetCC);
-      } else {
-        __ sub(tos, tos, Operand(value), SetCC);
-      }
-      deferred->Branch(vs);
-      if (!both_sides_are_smi) {
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-      }
-      deferred->BindExit();
-      frame_->EmitPush(tos);
-      break;
-    }
-
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      if (both_sides_are_smi) {
-        switch (op) {
-          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
-          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
-          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
-          default: UNREACHABLE();
-        }
-        frame_->EmitPush(tos, TypeInfo::Smi());
-      } else {
-        DeferredInlineSmiOperation* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->JumpToNonSmiInput(ne);
-        switch (op) {
-          case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
-          case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
-          case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
-          default: UNREACHABLE();
-        }
-        deferred->BindExit();
-        TypeInfo result_type = TypeInfo::Integer32();
-        if (op == Token::BIT_AND && int_value >= 0) {
-          result_type = TypeInfo::Smi();
-        }
-        frame_->EmitPush(tos, result_type);
-      }
-      break;
-    }
-
-    case Token::SHL:
-      if (reversed) {
-        ASSERT(both_sides_are_smi);
-        int max_shift = 0;
-        int max_result = int_value == 0 ? 1 : int_value;
-        while (Smi::IsValid(max_result << 1)) {
-          max_shift++;
-          max_result <<= 1;
-        }
-        DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, true, mode, tos);
-        // Mask off the last 5 bits of the shift operand (rhs).  This is part
-        // of the definition of shift in JS and we know we have a Smi so we
-        // can safely do this.  The masked version gets passed to the
-        // deferred code, but that makes no difference.
-        __ and_(tos, tos, Operand(Smi::FromInt(0x1f)));
-        __ cmp(tos, Operand(Smi::FromInt(max_shift)));
-        deferred->Branch(ge);
-        Register scratch = VirtualFrame::scratch0();
-        __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Untag.
-        __ mov(tos, Operand(Smi::FromInt(int_value)));    // Load constant.
-        __ mov(tos, Operand(tos, LSL, scratch));          // Shift constant.
-        deferred->BindExit();
-        TypeInfo result = TypeInfo::Integer32();
-        frame_->EmitPush(tos, result);
-        break;
-      }
-      // Fall through!
-    case Token::SHR:
-    case Token::SAR: {
-      ASSERT(!reversed);
-      int shift_value = int_value & 0x1f;
-      TypeInfo result = TypeInfo::Number();
-
-      if (op == Token::SHR) {
-        if (shift_value > 1) {
-          result = TypeInfo::Smi();
-        } else if (shift_value > 0) {
-          result = TypeInfo::Integer32();
-        }
-      } else if (op == Token::SAR) {
-        if (shift_value > 0) {
-          result = TypeInfo::Smi();
-        } else {
-          result = TypeInfo::Integer32();
-        }
-      } else {
-        ASSERT(op == Token::SHL);
-        result = TypeInfo::Integer32();
-      }
-
-      DeferredInlineSmiOperation* deferred =
-        new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
-      if (!both_sides_are_smi) {
-        __ tst(tos, Operand(kSmiTagMask));
-        deferred->JumpToNonSmiInput(ne);
-      }
-      switch (op) {
-        case Token::SHL: {
-          if (shift_value != 0) {
-            Register untagged_result = VirtualFrame::scratch0();
-            Register scratch = VirtualFrame::scratch1();
-            int adjusted_shift = shift_value - kSmiTagSize;
-            ASSERT(adjusted_shift >= 0);
-
-            if (adjusted_shift != 0) {
-              __ mov(untagged_result, Operand(tos, LSL, adjusted_shift));
-            } else {
-              __ mov(untagged_result, Operand(tos));
-            }
-            // Check that the *signed* result fits in a smi.
-            __ add(scratch, untagged_result, Operand(0x40000000), SetCC);
-            deferred->JumpToAnswerOutOfRange(mi);
-            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
-          }
-          break;
-        }
-        case Token::SHR: {
-          if (shift_value != 0) {
-            Register untagged_result = VirtualFrame::scratch0();
-            // Remove tag.
-            __ mov(untagged_result, Operand(tos, ASR, kSmiTagSize));
-            __ mov(untagged_result, Operand(untagged_result, LSR, shift_value));
-            if (shift_value == 1) {
-              // Check that the *unsigned* result fits in a smi.
-              // Neither of the two high-order bits can be set:
-              // - 0x80000000: high bit would be lost when smi tagging
-              // - 0x40000000: this number would convert to negative when Smi
-              //   tagging.
-              // These two cases can only happen with shifts by 0 or 1 when
-              // handed a valid smi.
-              __ tst(untagged_result, Operand(0xc0000000));
-              deferred->JumpToAnswerOutOfRange(ne);
-            }
-            __ mov(tos, Operand(untagged_result, LSL, kSmiTagSize));
-          } else {
-            __ cmp(tos, Operand(0, RelocInfo::NONE));
-            deferred->JumpToAnswerOutOfRange(mi);
-          }
-          break;
-        }
-        case Token::SAR: {
-          if (shift_value != 0) {
-            // Do the shift and the tag removal in one operation. If the shift
-            // is 31 bits (the highest possible value) then we emit the
-            // instruction as a shift by 0 which in the ARM ISA means shift
-            // arithmetically by 32.
-            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
-            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
-          }
-          break;
-        }
-        default: UNREACHABLE();
-      }
-      deferred->BindExit();
-      frame_->EmitPush(tos, result);
-      break;
-    }
-
-    case Token::MOD: {
-      ASSERT(!reversed);
-      ASSERT(int_value >= 2);
-      ASSERT(IsPowerOf2(int_value));
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-      unsigned mask = (0x80000000u | kSmiTagMask);
-      __ tst(tos, Operand(mask));
-      deferred->Branch(ne);  // Go to deferred code on non-Smis and negative.
-      mask = (int_value << kSmiTagSize) - 1;
-      __ and_(tos, tos, Operand(mask));
-      deferred->BindExit();
-      // Mod of positive power of 2 Smi gives a Smi if the lhs is an integer.
-      frame_->EmitPush(
-          tos,
-          both_sides_are_smi ? TypeInfo::Smi() : TypeInfo::Number());
-      break;
-    }
-
-    case Token::MUL: {
-      ASSERT(IsEasyToMultiplyBy(int_value));
-      DeferredCode* deferred =
-          new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
-      unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value;
-      max_smi_that_wont_overflow <<= kSmiTagSize;
-      unsigned mask = 0x80000000u;
-      while ((mask & max_smi_that_wont_overflow) == 0) {
-        mask |= mask >> 1;
-      }
-      mask |= kSmiTagMask;
-      // This does a single mask that checks for a too high value in a
-      // conservative way and for a non-Smi.  It also filters out negative
-      // numbers, unfortunately, but since this code is inline we prefer
-      // brevity to comprehensiveness.
-      __ tst(tos, Operand(mask));
-      deferred->Branch(ne);
-      InlineMultiplyByKnownInt(masm_, tos, tos, int_value);
-      deferred->BindExit();
-      frame_->EmitPush(tos);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void CodeGenerator::Comparison(Condition cond,
-                               Expression* left,
-                               Expression* right,
-                               bool strict) {
-  VirtualFrame::RegisterAllocationScope scope(this);
-
-  if (left != NULL) Load(left);
-  if (right != NULL) Load(right);
-
-  // sp[0] : y
-  // sp[1] : x
-  // result : cc register
-
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cond == eq);
-
-  Register lhs;
-  Register rhs;
-
-  bool lhs_is_smi;
-  bool rhs_is_smi;
-
-  // We load the top two stack positions into registers chosen by the virtual
-  // frame.  This should keep the register shuffling to a minimum.
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cond == gt || cond == le) {
-    cond = ReverseCondition(cond);
-    lhs_is_smi = frame_->KnownSmiAt(0);
-    rhs_is_smi = frame_->KnownSmiAt(1);
-    lhs = frame_->PopToRegister();
-    rhs = frame_->PopToRegister(lhs);  // Don't pop to the same register again!
-  } else {
-    rhs_is_smi = frame_->KnownSmiAt(0);
-    lhs_is_smi = frame_->KnownSmiAt(1);
-    rhs = frame_->PopToRegister();
-    lhs = frame_->PopToRegister(rhs);  // Don't pop to the same register again!
-  }
-
-  bool both_sides_are_smi = (lhs_is_smi && rhs_is_smi);
-
-  ASSERT(rhs.is(r0) || rhs.is(r1));
-  ASSERT(lhs.is(r0) || lhs.is(r1));
-
-  JumpTarget exit;
-
-  if (!both_sides_are_smi) {
-    // Now we have the two sides in r0 and r1.  We flush any other registers
-    // because the stub doesn't know about register allocation.
-    frame_->SpillAll();
-    Register scratch = VirtualFrame::scratch0();
-    Register smi_test_reg;
-    if (lhs_is_smi) {
-      smi_test_reg = rhs;
-    } else if (rhs_is_smi) {
-      smi_test_reg = lhs;
-    } else {
-      __ orr(scratch, lhs, Operand(rhs));
-      smi_test_reg = scratch;
-    }
-    __ tst(smi_test_reg, Operand(kSmiTagMask));
-    JumpTarget smi;
-    smi.Branch(eq);
-
-    // Perform non-smi comparison by stub.
-    // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
-    // We call with 0 args because there are 0 on the stack.
-    CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
-    frame_->CallStub(&stub, 0);
-    __ cmp(r0, Operand(0, RelocInfo::NONE));
-    exit.Jump();
-
-    smi.Bind();
-  }
-
-  // Do smi comparisons by pointer comparison.
-  __ cmp(lhs, Operand(rhs));
-
-  exit.Bind();
-  cc_reg_ = cond;
-}
-
-
-// Call the function on the stack with the given arguments.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  frame_->CallStub(&call_function, arg_count + 1);
-
-  // Restore context and pop function from the stack.
-  __ ldr(cp, frame_->Context());
-  frame_->Drop();  // discard the TOS
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame_->Dup();
-  frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
-  frame_->EmitPush(r0);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // At this point the top two stack elements are probably in registers
-  // since they were just loaded.  Ensure they are in regs and get the
-  // regs.
-  Register receiver_reg = frame_->Peek2();
-  Register arguments_reg = frame_->Peek();
-
-  // From now on the frame is spilled.
-  frame_->SpillAll();
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of the stack at this point:
-  //   sp[0]: arguments object of the current function or the hole.
-  //   sp[1]: receiver
-  //   sp[2]: applicand.apply
-  //   sp[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  JumpTarget slow;
-  Label done;
-  __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
-  __ cmp(ip, arguments_reg);
-  slow.Branch(ne);
-
-  Label build_args;
-  // Get rid of the arguments object probe.
-  frame_->Drop();
-  // Stack now has 3 elements on it.
-  // Contents of stack at this point:
-  //   sp[0]: receiver - in the receiver_reg register.
-  //   sp[1]: applicand.apply
-  //   sp[2]: applicand.
-
-  // Check that the receiver really is a JavaScript object.
-  __ JumpIfSmi(receiver_reg, &build_args);
-  // We allow all JSObjects including JSFunctions.  As long as
-  // JS_FUNCTION_TYPE is the last instance type and it is right
-  // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-  // bound.
-  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &build_args);
-
-  // Check that applicand.apply is Function.prototype.apply.
-  __ ldr(r0, MemOperand(sp, kPointerSize));
-  __ JumpIfSmi(r0, &build_args);
-  __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &build_args);
-  Handle<Code> apply_code(
-      Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply));
-  __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
-  __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ cmp(r1, Operand(apply_code));
-  __ b(ne, &build_args);
-
-  // Check that applicand is a function.
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ JumpIfSmi(r1, &build_args);
-  __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
-  __ b(ne, &build_args);
-
-  // Copy the arguments to this function possibly from the
-  // adaptor frame below it.
-  Label invoke, adapted;
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &adapted);
-
-  // No arguments adaptor frame. Copy fixed number of arguments.
-  __ mov(r0, Operand(scope()->num_parameters()));
-  for (int i = 0; i < scope()->num_parameters(); i++) {
-    __ ldr(r2, frame_->ParameterAt(i));
-    __ push(r2);
-  }
-  __ jmp(&invoke);
-
-  // Arguments adaptor frame present. Copy arguments from there, but
-  // avoid copying too many arguments to avoid stack overflows.
-  __ bind(&adapted);
-  static const uint32_t kArgumentsLimit = 1 * KB;
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ mov(r0, Operand(r0, LSR, kSmiTagSize));
-  __ mov(r3, r0);
-  __ cmp(r0, Operand(kArgumentsLimit));
-  __ b(gt, &build_args);
-
-  // Loop through the arguments pushing them onto the execution
-  // stack. We don't inform the virtual frame of the push, so we don't
-  // have to worry about getting rid of the elements from the virtual
-  // frame.
-  Label loop;
-  // r3 is a small non-negative integer, due to the test above.
-  __ cmp(r3, Operand(0, RelocInfo::NONE));
-  __ b(eq, &invoke);
-  // Compute the address of the first argument.
-  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
-  __ add(r2, r2, Operand(kPointerSize));
-  __ bind(&loop);
-  // Post-decrement argument address by kPointerSize on each iteration.
-  __ ldr(r4, MemOperand(r2, kPointerSize, NegPostIndex));
-  __ push(r4);
-  __ sub(r3, r3, Operand(1), SetCC);
-  __ b(gt, &loop);
-
-  // Invoke the function.
-  __ bind(&invoke);
-  ParameterCount actual(r0);
-  __ InvokeFunction(r1, actual, CALL_FUNCTION);
-  // Drop applicand.apply and applicand from the stack, and push
-  // the result of the function call, but leave the spilled frame
-  // unchanged, with 3 elements, so it is correct when we compile the
-  // slow-case code.
-  __ add(sp, sp, Operand(2 * kPointerSize));
-  __ push(r0);
-  // Stack now has 1 element:
-  //   sp[0]: result
-  __ jmp(&done);
-
-  // Slow-case: Allocate the arguments object since we know it isn't
-  // there, and fall-through to the slow-case where we call
-  // applicand.apply.
-  __ bind(&build_args);
-  // Stack now has 3 elements, because we have jumped from where:
-  //   sp[0]: receiver
-  //   sp[1]: applicand.apply
-  //   sp[2]: applicand.
-  StoreArgumentsObject(false);
-
-  // Stack and frame now have 4 elements.
-  slow.Bind();
-
-  // Generic computation of x.apply(y, args) with no special optimization.
-  // Flip applicand.apply and applicand on the stack, so
-  // applicand looks like the receiver of the applicand.apply call.
-  // Then process it as a normal function call.
-  __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
-
-  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-  frame_->CallStub(&call_function, 3);
-  // The function and its two arguments have been dropped.
-  frame_->Drop();  // Drop the receiver as well.
-  frame_->EmitPush(r0);
-  frame_->SpillAll();  // A spilled frame is also jumping to label done.
-  // Stack now has 1 element:
-  //   sp[0]: result
-  __ bind(&done);
-
-  // Restore the context register after a call.
-  __ ldr(cp, frame_->Context());
-}
-
-
-void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  ASSERT(has_cc());
-  Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
-  target->Branch(cond);
-  cc_reg_ = al;
-}
-
-
-void CodeGenerator::CheckStack() {
-  frame_->SpillAll();
-  Comment cmnt(masm_, "[ check stack");
-  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-  masm_->cmp(sp, Operand(ip));
-  StackCheckStub stub;
-  // Call the stub if lower.
-  masm_->mov(ip,
-             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                     RelocInfo::CODE_TARGET),
-             LeaveCC,
-             lo);
-  masm_->Call(ip, lo);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->SetExpectedHeight();
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  frame_->EmitPush(cp);
-  frame_->EmitPush(Operand(pairs));
-  frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
-  frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // The result is discarded.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(var->name()));
-    // Declaration nodes are always declared in only two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Operand(Smi::FromInt(attr)));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Operand(0, RelocInfo::NONE));
-    }
-
-    frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-
-    ASSERT(frame_->height() == original_height);
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-
-  if (val != NULL) {
-    WriteBarrierCharacter wb_info =
-        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
-    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
-    // Set initial value.
-    Reference target(this, node->proxy());
-    Load(val);
-    target.SetValue(NOT_CONST_INIT, wb_info);
-
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  frame_->Drop();
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    Comment cmnt(masm_, "[ IfThenElse");
-    JumpTarget then;
-    JumpTarget else_;
-    // if (cond)
-    LoadCondition(node->condition(), &then, &else_, true);
-    if (frame_ != NULL) {
-      Branch(false, &else_);
-    }
-    // then
-    if (frame_ != NULL || then.is_linked()) {
-      then.Bind();
-      Visit(node->then_statement());
-    }
-    if (frame_ != NULL) {
-      exit.Jump();
-    }
-    // else
-    if (else_.is_linked()) {
-      else_.Bind();
-      Visit(node->else_statement());
-    }
-
-  } else if (has_then_stm) {
-    Comment cmnt(masm_, "[ IfThen");
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    // if (cond)
-    LoadCondition(node->condition(), &then, &exit, true);
-    if (frame_ != NULL) {
-      Branch(false, &exit);
-    }
-    // then
-    if (frame_ != NULL || then.is_linked()) {
-      then.Bind();
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    Comment cmnt(masm_, "[ IfElse");
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    // if (!cond)
-    LoadCondition(node->condition(), &exit, &else_, true);
-    if (frame_ != NULL) {
-      Branch(true, &exit);
-    }
-    // else
-    if (frame_ != NULL || else_.is_linked()) {
-      else_.Bind();
-      Visit(node->else_statement());
-    }
-
-  } else {
-    Comment cmnt(masm_, "[ If");
-    ASSERT(!has_then_stm && !has_else_stm);
-    // if (cond)
-    LoadCondition(node->condition(), &exit, &exit, false);
-    if (frame_ != NULL) {
-      if (has_cc()) {
-        cc_reg_ = al;
-      } else {
-        frame_->Drop();
-      }
-    }
-  }
-
-  // end
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  frame_->PopToR0();
-  frame_->PrepareForReturn();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump();
-  } else {
-    // Pop the result from the frame and prepare the frame for
-    // returning thus making it easier to merge.
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump();
-    } else {
-      function_return_.Bind();
-      GenerateReturnSequence();
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence() {
-  if (FLAG_trace) {
-    // Push the return value on the stack as the parameter.
-    // Runtime::TraceExit returns the parameter as it is.
-    frame_->EmitPush(r0);
-    frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-
-#ifdef DEBUG
-  // Add a label for checking the size of the code used for returning.
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-  // Make sure that the constant pool is not emitted inside of the return
-  // sequence.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Tear down the frame which will restore the caller's frame pointer and
-    // the link register.
-    frame_->Exit();
-
-    // Here we use masm_-> instead of the __ macro to avoid the code coverage
-    // tool from instrumenting as we rely on the code size here.
-    int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
-    masm_->add(sp, sp, Operand(sp_delta));
-    masm_->Jump(lr);
-    DeleteFrame();
-
-#ifdef DEBUG
-    // Check that the size of the code used for returning is large enough
-    // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceInstructions <=
-           masm_->InstructionsGeneratedSince(&check_exit_codesize));
-#endif
-  }
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  if (node->is_catch_block()) {
-    frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-#ifdef DEBUG
-  JumpTarget verified_true;
-  __ cmp(r0, cp);
-  verified_true.Branch(eq);
-  __ stop("PushContext: r0 is expected to be the same as cp");
-  verified_true.Bind();
-#endif
-  // Update context local.
-  __ str(cp, frame_->Context());
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
-  // Update context local.
-  __ str(cp, frame_->Context());
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->SetExpectedHeight();
-
-  Load(node->tag());
-
-  JumpTarget next_test;
-  JumpTarget fall_through;
-  JumpTarget default_entry;
-  JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-    if (clause->is_default()) {
-      // Remember the default clause and compile it at the end.
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case clause");
-    // Compile the test.
-    next_test.Bind();
-    next_test.Unuse();
-    // Duplicate TOS.
-    frame_->Dup();
-    Comparison(eq, NULL, clause->label(), true);
-    Branch(false, &next_test);
-
-    // Before entering the body from the test, remove the switch value from
-    // the stack.
-    frame_->Drop();
-
-    // Label the body so that fall through is enabled.
-    if (i > 0 && cases->at(i - 1)->is_default()) {
-      default_exit.Bind();
-    } else {
-      fall_through.Bind();
-      fall_through.Unuse();
-    }
-    VisitStatements(clause->statements());
-
-    // If control flow can fall through from the body, jump to the next body
-    // or the end of the statement.
-    if (frame_ != NULL) {
-      if (i < length - 1 && cases->at(i + 1)->is_default()) {
-        default_entry.Jump();
-      } else {
-        fall_through.Jump();
-      }
-    }
-  }
-
-  // The final "test" removes the switch value.
-  next_test.Bind();
-  frame_->Drop();
-
-  // If there is a default clause, compile it.
-  if (default_clause != NULL) {
-    Comment cmnt(masm_, "[ Default clause");
-    default_entry.Bind();
-    VisitStatements(default_clause->statements());
-    // If control flow can fall out of the default and there is a case after
-    // it, jump to that case's body.
-    if (frame_ != NULL && default_exit.is_bound()) {
-      default_exit.Jump();
-    }
-  }
-
-  if (fall_through.is_linked()) {
-    fall_through.Bind();
-  }
-
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->SetExpectedHeight();
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  // Label the top of the loop for the backward CFG edge.  If the test
-  // is always true we can use the continue target, and if the test is
-  // always false there is no need.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  switch (info) {
-    case ALWAYS_TRUE:
-      node->continue_target()->SetExpectedHeight();
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      node->continue_target()->SetExpectedHeight();
-      break;
-    case DONT_KNOW:
-      node->continue_target()->SetExpectedHeight();
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control can fall off the end of the body, jump back to the
-      // top.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // If we have a continue in the body, we only have to bind its
-      // jump target.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        LoadCondition(node->cond(), &body, node->break_target(), true);
-        if (has_valid_frame()) {
-          // A invalid frame here indicates that control did not
-          // fall out of the test expression.
-          Branch(true, &body);
-        }
-      }
-      break;
-  }
-
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the test is never true and has no side effects there is no need
-  // to compile the test or body.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  node->break_target()->SetExpectedHeight();
-  IncrementLoopNesting();
-
-  // Label the top of the loop with the continue target for the backward
-  // CFG edge.
-  node->continue_target()->SetExpectedHeight();
-  node->continue_target()->Bind();
-
-  if (info == DONT_KNOW) {
-    JumpTarget body(JumpTarget::BIDIRECTIONAL);
-    LoadCondition(node->cond(), &body, node->break_target(), true);
-    if (has_valid_frame()) {
-      // A NULL frame indicates that control did not fall out of the
-      // test expression.
-      Branch(false, node->break_target());
-    }
-    if (has_valid_frame() || body.is_linked()) {
-      body.Bind();
-    }
-  }
-
-  if (has_valid_frame()) {
-    CheckStack();  // TODO(1222600): ignore if body contains calls.
-    Visit(node->body());
-
-    // If control flow can fall out of the body, jump back to the top.
-    if (has_valid_frame()) {
-      node->continue_target()->Jump();
-    }
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the test is never true there is no need to compile the test or
-  // body.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  node->break_target()->SetExpectedHeight();
-  IncrementLoopNesting();
-
-  // We know that the loop index is a smi if it is not modified in the
-  // loop body and it is checked against a constant limit in the loop
-  // condition.  In this case, we reset the static type information of the
-  // loop index to smi before compiling the body, the update expression, and
-  // the bottom check of the loop condition.
-  TypeInfoCodeGenState type_info_scope(this,
-                                       node->is_fast_smi_loop() ?
-                                       node->loop_variable()->AsSlot() :
-                                       NULL,
-                                       TypeInfo::Smi());
-
-  // If there is no update statement, label the top of the loop with the
-  // continue target, otherwise with the loop target.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  if (node->next() == NULL) {
-    node->continue_target()->SetExpectedHeight();
-    node->continue_target()->Bind();
-  } else {
-    node->continue_target()->SetExpectedHeight();
-    loop.Bind();
-  }
-
-  // If the test is always true, there is no need to compile it.
-  if (info == DONT_KNOW) {
-    JumpTarget body;
-    LoadCondition(node->cond(), &body, node->break_target(), true);
-    if (has_valid_frame()) {
-      Branch(false, node->break_target());
-    }
-    if (has_valid_frame() || body.is_linked()) {
-      body.Bind();
-    }
-  }
-
-  if (has_valid_frame()) {
-    CheckStack();  // TODO(1222600): ignore if body contains calls.
-    Visit(node->body());
-
-    if (node->next() == NULL) {
-      // If there is no update statement and control flow can fall out
-      // of the loop, jump directly to the continue label.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-    } else {
-      // If there is an update statement and control flow can reach it
-      // via falling out of the body of the loop or continuing, we
-      // compile the update statement.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        // Record source position of the statement as this code which is
-        // after the code for the body actually belongs to the loop
-        // statement and not the body.
-        CodeForStatementPosition(node);
-        Visit(node->next());
-        loop.Jump();
-      }
-    }
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  Load(node->enumerable());
-
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(r0);
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, ip);
-  exit.Branch(eq);
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(r0, ip);
-  exit.Branch(eq);
-
-  // Stack layout in body:
-  // [iteration counter (Smi)]
-  // [length of array]
-  // [FixedArray]
-  // [Map or 0]
-  // [Object]
-
-  // Check if enumerable is already a JSObject
-  __ tst(r0, Operand(kSmiTagMask));
-  primitive.Branch(eq);
-  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
-  jsobject.Branch(hs);
-
-  primitive.Bind();
-  frame_->EmitPush(r0);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, 1);
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // r0: value to be iterated over
-  frame_->EmitPush(r0);  // Push the object being iterated over.
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ mov(r1, Operand(r0));
-  loop.Bind();
-  // Check that there are no elements.
-  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-  __ cmp(r2, r4);
-  call_runtime.Branch(ne);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in r3 for the subsequent
-  // prototype load.
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldr(r2, FieldMemOperand(r3, Map::kInstanceDescriptorsOffset));
-  __ LoadRoot(ip, Heap::kEmptyDescriptorArrayRootIndex);
-  __ cmp(r2, ip);
-  call_runtime.Branch(eq);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumerationIndexOffset));
-  __ tst(r2, Operand(kSmiTagMask));
-  call_runtime.Branch(eq);
-  // For all objects but the receiver, check that the cache is empty.
-  // r4: empty fixed array root.
-  __ cmp(r1, r0);
-  check_prototype.Branch(eq);
-  __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmp(r2, r4);
-  call_runtime.Branch(ne);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ ldr(r1, FieldMemOperand(r3, Map::kPrototypeOffset));
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(r1, ip);
-  loop.Branch(ne);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(r0);  // push the object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a map from the runtime call, we can do a fast
-  // modification check. Otherwise, we got a fixed array, and we have
-  // to do a slow check.
-  // r0: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ mov(r2, Operand(r0));
-  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
-  __ cmp(r1, ip);
-  fixed_array.Branch(ne);
-
-  use_cache.Bind();
-  // Get enum cache
-  // r0: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ mov(r1, Operand(r0));
-  __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
-  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
-  __ ldr(r2,
-         FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(r0);  // map
-  frame_->EmitPush(r2);  // enum cache bridge cache
-  __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
-  frame_->EmitPush(r0);
-  __ mov(r0, Operand(Smi::FromInt(0)));
-  frame_->EmitPush(r0);
-  entry.Jump();
-
-  fixed_array.Bind();
-  __ mov(r1, Operand(Smi::FromInt(0)));
-  frame_->EmitPush(r1);  // insert 0 in place of Map
-  frame_->EmitPush(r0);
-
-  // Push the length of the array and the initial index onto the stack.
-  __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
-  frame_->EmitPush(r0);
-  __ mov(r0, Operand(Smi::FromInt(0)));  // init index
-  frame_->EmitPush(r0);
-
-  // Condition.
-  entry.Bind();
-  // sp[0] : index
-  // sp[1] : array/enum cache length
-  // sp[2] : array or enum cache
-  // sp[3] : 0 or map
-  // sp[4] : enumerable
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->SetExpectedHeight();
-  node->continue_target()->SetExpectedHeight();
-
-  // Load the current count to r0, load the length to r1.
-  __ Ldrd(r0, r1, frame_->ElementAt(0));
-  __ cmp(r0, r1);  // compare to the array length
-  node->break_target()->Branch(hs);
-
-  // Get the i'th entry of the array.
-  __ ldr(r2, frame_->ElementAt(2));
-  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-  // Get Map or 0.
-  __ ldr(r2, frame_->ElementAt(3));
-  // Check if this (still) matches the map of the enumerable.
-  // If not, we have to filter the key.
-  __ ldr(r1, frame_->ElementAt(4));
-  __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(r2));
-  end_del_check.Branch(eq);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  __ ldr(r0, frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(r0);
-  frame_->EmitPush(r3);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, 2);
-  __ mov(r3, Operand(r0), SetCC);
-  // If the property has been removed while iterating, we just skip it.
-  node->continue_target()->Branch(eq);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  r3: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(r3);  // push entry
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    Reference each(this, node->each());
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        // Loading a reference may leave the frame in an unspilled state.
-        frame_->SpillAll();  // Sync stack to memory.
-        // Get the value (under the reference on the stack) from memory.
-        __ ldr(r0, frame_->ElementAt(each.size()));
-        frame_->EmitPush(r0);
-        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
-        frame_->Drop(2);  // The result of the set and the extra pushed value.
-      } else {
-        // If the reference was to a slot we rely on the convenient property
-        // that it doesn't matter whether a value (eg, ebx pushed above) is
-        // right on top of or right underneath a zero-sized reference.
-        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
-        frame_->Drop(1);  // Drop the result of the set operation.
-      }
-    }
-  }
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    Visit(node->body());
-  }
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(r0);
-  __ add(r0, r0, Operand(Smi::FromInt(1)));
-  frame_->EmitPush(r0);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(r0);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->catch_block()->statements());
-  }
-  if (frame_ != NULL) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the labels for all escapes from the try block, including
-  // returns. During shadowing, the original label is hidden as the
-  // LabelShadow and operations on the original actually affect the
-  // shadowing label.
-  //
-  // We should probably try to unify the escaping labels and the return
-  // label.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->try_block()->statements());
-  }
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original labels are unshadowed and the
-  // LabelShadows represent the formerly shadowing labels.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(r1);  // r0 can contain the return value.
-    __ mov(r3, Operand(handler_address));
-    __ str(r1, MemOperand(r3));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing labels that have been
-  // jumped to.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain;
-      shadows[i]->Bind();
-      // Because we can be jumping here (to spilled code) from unspilled
-      // code, we need to reestablish a spilled frame at this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ mov(r3, Operand(handler_address));
-      __ ldr(sp, MemOperand(r3));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(r1);  // r0 can contain the return value.
-      __ str(r1, MemOperand(r3));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
-        frame_->PrepareForReturn();
-      }
-      shadows[i]->other_target()->Jump();
-    }
-  }
-
-  exit.Bind();
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(r0);  // save exception object on the stack
-  // In case of thrown exceptions, this is where we continue.
-  __ mov(r2, Operand(Smi::FromInt(THROWING)));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the labels for all escapes from the try block, including
-  // returns.  Shadowing hides the original label as the LabelShadow and
-  // operations on the original actually affect the shadowing label.
-  //
-  // We should probably try to unify the escaping labels and the return
-  // label.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->try_block()->statements());
-  }
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original labels are unshadowed and the
-  // LabelShadows represent the formerly shadowing labels.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(r1);
-    __ mov(r3, Operand(handler_address));
-    __ str(r1, MemOperand(r3));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in r2, then jump around the unlink blocks if any.
-    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-    frame_->EmitPush(r0);
-    __ mov(r2, Operand(Smi::FromInt(FALLING)));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // in (a non-refcounted reference to) r0.  We must preserve it
-      // until it is pushed.
-      //
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      shadows[i]->Bind();
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ mov(r3, Operand(handler_address));
-      __ ldr(sp, MemOperand(r3));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.  The next
-      // handler address is currently on top of the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(r1);
-      __ str(r1, MemOperand(r3));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this label shadowed the function return, materialize the
-        // return value on the stack.
-        frame_->EmitPush(r0);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-        frame_->EmitPush(r0);
-      }
-      __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(r2);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  { VirtualFrame::RegisterAllocationScope scope(this);
-    VisitStatements(node->finally_block()->statements());
-  }
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(r2);
-    frame_->EmitPop(r0);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      JumpTarget* original = shadows[i]->other_target();
-      __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
-      if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
-        JumpTarget skip;
-        skip.Branch(ne);
-        frame_->PrepareForReturn();
-        original->Jump();
-        skip.Bind();
-      } else {
-        original->Branch(eq);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ cmp(r2, Operand(Smi::FromInt(THROWING)));
-    exit.Branch(ne);
-
-    // Rethrow exception.
-    frame_->EmitPush(r0);
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ DebuggerStatament");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  frame_->DebugBreak();
-#endif
-  // Ignore the return value.
-  ASSERT(frame_->height() == original_height);
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame_->EmitPush(Operand(function_info));
-    frame_->SpillAll();
-    frame_->CallStub(&stub, 1);
-    frame_->EmitPush(r0);
-  } else {
-    // Create a new closure.
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(function_info));
-    frame_->EmitPush(Operand(pretenure
-                             ? FACTORY->true_value()
-                             : FACTORY->false_value()));
-    frame_->CallRuntime(Runtime::kNewClosure, 3);
-    frame_->EmitPush(r0);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ FunctionLiteral");
-
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    ASSERT(frame_->height() == original_height);
-    return;
-  }
-  InstantiateFunction(function_info, node->pretenure());
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  InstantiateFunction(node->shared_function_info(), false);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then;
-  JumpTarget else_;
-  LoadCondition(node->condition(), &then, &else_, true);
-  if (has_valid_frame()) {
-    Branch(false, &else_);
-  }
-  if (has_valid_frame() || then.is_linked()) {
-    then.Bind();
-    Load(node->then_expression());
-  }
-  if (else_.is_linked()) {
-    JumpTarget exit;
-    if (has_valid_frame()) exit.Jump();
-    else_.Bind();
-    Load(node->else_expression());
-    if (exit.is_linked()) exit.Bind();
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // JumpTargets do not yet support merging frames so the frame must be
-    // spilled when jumping to these targets.
-    JumpTarget slow;
-    JumpTarget done;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(slot->var()->name()));
-
-    if (typeof_state == INSIDE_TYPEOF) {
-      frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-      frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind();
-    frame_->EmitPush(r0);
-
-  } else {
-    Register scratch = VirtualFrame::scratch0();
-    TypeInfo info = type_info(slot);
-    frame_->EmitPush(SlotOperand(slot, scratch), info);
-
-    if (slot->var()->mode() == Variable::CONST) {
-      // Const slots may contain 'the hole' value (the constant hasn't been
-      // initialized yet) which needs to be converted into the 'undefined'
-      // value.
-      Comment cmnt(masm_, "[ Unhole const");
-      Register tos = frame_->PopToRegister();
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(tos, ip);
-      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
-      frame_->EmitPush(tos);
-    }
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  VirtualFrame::RegisterAllocationScope scope(this);
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Load the loaded value from the stack into a register but leave it on the
-  // stack.
-  Register tos = frame_->Peek();
-
-  // If the loaded value is the sentinel that indicates that we
-  // haven't loaded the arguments object yet, we need to do it now.
-  JumpTarget exit;
-  __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
-  __ cmp(tos, ip);
-  exit.Branch(ne);
-  frame_->Drop();
-  StoreArgumentsObject(false);
-  exit.Bind();
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  ASSERT(slot != NULL);
-  VirtualFrame::RegisterAllocationScope scope(this);
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(slot->var()->name()));
-
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize
-      // const properties (introduced via eval("const foo = (some
-      // expr);")). Also, uses the current function context instead of
-      // the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the
-      // same time, because the const declaration may be at the end of
-      // the eval code (sigh...) and the const variable may have been
-      // used before (where its value is 'undefined'). Thus, we can only
-      // do the initialization when we actually encounter the expression
-      // and when the expression operands are defined and valid, and
-      // thus we need the split into 2 operations: declaration of the
-      // context slot followed by initialization.
-      frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-      frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling assignment expressions.
-    frame_->EmitPush(r0);
-
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-    Register scratch = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-
-    // The frame must be spilled when branching to this target.
-    JumpTarget exit;
-
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is
-      // executed, the code is identical to a normal store (see below).
-      Comment cmnt(masm_, "[ Init const");
-      __ ldr(scratch, SlotOperand(slot, scratch));
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(scratch, ip);
-      exit.Branch(ne);
-    }
-
-    // We must execute the store.  Storing a variable must keep the
-    // (new) value on the stack. This is necessary for compiling
-    // assignment expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will
-    // initialize consts to 'the hole' value and by doing so, end up
-    // calling this code.  r2 may be loaded with context; used below in
-    // RecordWrite.
-    Register tos = frame_->Peek();
-    __ str(tos, SlotOperand(slot, scratch));
-    if (slot->type() == Slot::CONTEXT) {
-      // Skip write barrier if the written value is a smi.
-      __ tst(tos, Operand(kSmiTagMask));
-      // We don't use tos any more after here.
-      exit.Branch(eq);
-      // scratch is loaded with context when calling SlotOperand above.
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      // We need an extra register.  Until we have a way to do that in the
-      // virtual frame we will cheat and ask for a free TOS register.
-      Register scratch3 = frame_->GetTOSRegister();
-      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
-    }
-    // If we definitely did not jump over the assignment, we do not need
-    // to bind the exit label.  Doing so can defeat peephole
-    // optimization.
-    if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
-      exit.Bind();
-    }
-  }
-}
-
-
-void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                                      TypeofState typeof_state,
-                                                      JumpTarget* slow) {
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register tmp = frame_->scratch0();
-  Register tmp2 = frame_->scratch1();
-  Register context = cp;
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        frame_->SpillAll();
-        // Check that extension is NULL.
-        __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
-        __ tst(tmp2, tmp2);
-        slow->Branch(ne);
-      }
-      // Load next context in chain.
-      __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-      __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-      context = tmp;
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s->is_eval_scope()) {
-    frame_->SpillAll();
-    Label next, fast;
-    __ Move(tmp, context);
-    __ bind(&next);
-    // Terminate at global context.
-    __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
-    __ cmp(tmp2, ip);
-    __ b(eq, &fast);
-    // Check that extension is NULL.
-    __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
-    __ tst(tmp2, tmp2);
-    slow->Branch(ne);
-    // Load next context in chain.
-    __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
-    __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
-    __ b(&next);
-    __ bind(&fast);
-  }
-
-  // Load the global object.
-  LoadGlobal();
-  // Setup the name register and call load IC.
-  frame_->CallLoadIC(slot->var()->name(),
-                     typeof_state == INSIDE_TYPEOF
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT);
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    frame_->SpillAll();
-    done->Jump();
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    frame_->SpillAll();
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      __ ldr(r0,
-             ContextSlotOperandCheckExtensions(potential_slot,
-                                               r1,
-                                               r2,
-                                               slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-        __ cmp(r0, ip);
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      }
-      done->Jump();
-    } else if (rewrite != NULL) {
-      // Generate fast case for argument loads.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          __ ldr(r0,
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                   r1,
-                                                   r2,
-                                                   slow));
-          frame_->EmitPush(r0);
-          __ mov(r1, Operand(key_literal->handle()));
-          frame_->EmitPush(r1);
-          EmitKeyedLoad();
-          done->Jump();
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ VariableProxy");
-
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Literal");
-  Register reg = frame_->GetTOSRegister();
-  bool is_smi = node->handle()->IsSmi();
-  __ mov(reg, Operand(node->handle()));
-  frame_->EmitPush(reg, is_smi ? TypeInfo::Smi() : TypeInfo::Unknown());
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ RexExp Literal");
-
-  Register tmp = VirtualFrame::scratch0();
-  // Free up a TOS register that can be used to push the literal.
-  Register literal = frame_->GetTOSRegister();
-
-  // Retrieve the literal array and check the allocated entry.
-
-  // Load the function of this activation.
-  __ ldr(tmp, frame_->Function());
-
-  // Load the literals array of the function.
-  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
-
-  JumpTarget materialized;
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(literal, ip);
-  // This branch locks the virtual frame at the done label to match the
-  // one we have here, where the literal register is not on the stack and
-  // nothing is spilled.
-  materialized.Branch(ne);
-
-  // If the entry is undefined we call the runtime system to compute
-  // the literal.
-  // literal array  (0)
-  frame_->EmitPush(tmp);
-  // literal index  (1)
-  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
-  // RegExp pattern (2)
-  frame_->EmitPush(Operand(node->pattern()));
-  // RegExp flags   (3)
-  frame_->EmitPush(Operand(node->flags()));
-  frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  __ Move(literal, r0);
-
-  materialized.Bind();
-
-  frame_->EmitPush(literal);
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-  frame_->EmitPush(Operand(Smi::FromInt(size)));
-  frame_->CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  // TODO(lrn): Use AllocateInNewSpace macro with fallback to runtime.
-  // r0 is newly allocated space.
-
-  // Reuse literal variable with (possibly) a new register, still holding
-  // the materialized boilerplate.
-  literal = frame_->PopToRegister(r0);
-
-  __ CopyFields(r0, literal, tmp.bit(), size / kPointerSize);
-
-  // Push the clone.
-  frame_->EmitPush(r0);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  Register literal = frame_->GetTOSRegister();
-  // Load the function of this activation.
-  __ ldr(literal, frame_->Function());
-  // Literal array.
-  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
-  frame_->EmitPush(literal);
-  // Literal index.
-  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
-  // Constant properties.
-  frame_->EmitPush(Operand(node->constant_properties()));
-  // Should the object literal have fast elements?
-  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
-  if (node->depth() > 1) {
-    frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->EmitPush(r0);  // save the result
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    // At the start of each iteration, the top of stack contains
-    // the newly created object literal.
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    Literal* key = property->key();
-    Expression* value = property->value();
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through
-      case ObjectLiteral::Property::COMPUTED:
-        if (key->handle()->IsSymbol()) {
-          Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-              Builtins::kStoreIC_Initialize));
-          Load(value);
-          if (property->emit_store()) {
-            frame_->PopToR0();
-            // Fetch the object literal.
-            frame_->SpillAllButCopyTOSToR1();
-            __ mov(r2, Operand(key->handle()));
-            frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-          } else {
-            frame_->Drop();
-          }
-          break;
-        }
-        // else fall through
-      case ObjectLiteral::Property::PROTOTYPE: {
-        frame_->Dup();
-        Load(key);
-        Load(value);
-        if (property->emit_store()) {
-          frame_->EmitPush(Operand(Smi::FromInt(NONE)));  // PropertyAttributes
-          frame_->CallRuntime(Runtime::kSetProperty, 4);
-        } else {
-          frame_->Drop(3);
-        }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        frame_->Dup();
-        Load(key);
-        frame_->EmitPush(Operand(Smi::FromInt(1)));
-        Load(value);
-        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        frame_->Dup();
-        Load(key);
-        frame_->EmitPush(Operand(Smi::FromInt(0)));
-        Load(value);
-        frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        break;
-      }
-    }
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  Register tos = frame_->GetTOSRegister();
-  // Load the function of this activation.
-  __ ldr(tos, frame_->Function());
-  // Load the literals array of the function.
-  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
-  frame_->EmitPush(tos);
-  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
-  frame_->EmitPush(Operand(node->constant_elements()));
-  int length = node->values()->length();
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    frame_->CallStub(&stub, 3);
-    __ IncrementCounter(masm_->isolate()->counters()->cow_arrays_created_stub(),
-                        1, r1, r2);
-  } else if (node->depth() > 1) {
-    frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    frame_->CallStub(&stub, 3);
-  }
-  frame_->EmitPush(r0);  // save the result
-  // r0: created object literal
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < node->values()->length(); i++) {
-    Expression* value = node->values()->at(i);
-
-    // If value is a literal the property value is already set in the
-    // boilerplate object.
-    if (value->AsLiteral() != NULL) continue;
-    // If value is a materialized literal the property value is already set
-    // in the boilerplate object if it is simple.
-    if (CompileTimeValue::IsCompileTimeValue(value)) continue;
-
-    // The property must be set by generated code.
-    Load(value);
-    frame_->PopToR0();
-    // Fetch the object literal.
-    frame_->SpillAllButCopyTOSToR1();
-
-    // Get the elements array.
-    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ str(r0, FieldMemOperand(r1, offset));
-
-    // Update the write barrier for the array address.
-    __ RecordWrite(r1, Operand(offset), r3, r2);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->EmitPush(r0);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-
-    // Perform the binary operation.
-    Literal* literal = node->value()->AsLiteral();
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(),
-                   literal->handle(),
-                   false,
-                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (literal != NULL) {
-        ASSERT(!literal->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
-                             inline_smi);
-    }
-  } else {
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      Load(prop->obj());
-    } else {
-      frame_->Dup();
-    }
-    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame_->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      Load(prop->obj());
-    } else if (var != NULL) {
-      LoadGlobal();
-    } else {
-      frame_->Dup();
-    }
-    EmitNamedLoad(name, var != NULL);
-
-    // Perform the binary operation.
-    Literal* literal = node->value()->AsLiteral();
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(),
-                   literal->handle(),
-                   false,
-                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (literal != NULL) {
-        ASSERT(!literal->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
-                             inline_smi);
-    }
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    // Load the receiver and swap with the value.
-    Load(prop->obj());
-    Register t0 = frame_->PopToRegister();
-    Register t1 = frame_->PopToRegister(t0);
-    frame_->EmitPush(t0);
-    frame_->EmitPush(t1);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  EmitNamedStore(name, is_contextual);
-  frame_->EmitPush(r0);
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      Load(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment. Swap
-      // the receiver and the value of the assignment expression.
-      Register t0 = frame_->PopToRegister();
-      Register t1 = frame_->PopToRegister(t0);
-      frame_->EmitPush(t0);
-      frame_->EmitPush(t1);
-    }
-    frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  WriteBarrierCharacter wb_info;
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-  //
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame_->Dup2();
-    EmitKeyedLoad();
-    frame_->EmitPush(r0);
-
-    // Perform the binary operation.
-    Literal* literal = node->value()->AsLiteral();
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    if (literal != NULL && literal->handle()->IsSmi()) {
-      SmiOperation(node->binary_op(),
-                   literal->handle(),
-                   false,
-                   overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (literal != NULL) {
-        ASSERT(!literal->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
-                             inline_smi);
-    }
-    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-    wb_info = node->value()->AsLiteral() != NULL ?
-        NEVER_NEWSPACE :
-        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  EmitKeyedStore(prop->key()->type(), wb_info);
-  frame_->EmitPush(r0);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Register t0 = frame_->PopToRegister();
-    Register t1 = frame_->PopToRegister(t0);
-    frame_->EmitPush(t1);
-    frame_->EmitPush(t0);
-    frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  VirtualFrame::RegisterAllocationScope scope(this);
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Assignment");
-
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame_->EmitPush(r0);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Throw");
-
-  Load(node->exception());
-  CodeForSourcePosition(node->position());
-  frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->EmitPush(r0);
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Property");
-
-  { Reference property(this, node);
-    property.GetValue();
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ Call");
-
-  Expression* function = node->expression();
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Standard function call.
-  // Check if the function is a variable or a property.
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare stack for call to resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-    }
-
-    VirtualFrame::SpilledScope spilled_scope(frame_);
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                        NOT_INSIDE_TYPEOF,
-                                        &slow);
-      frame_->EmitPush(r0);
-      if (arg_count > 0) {
-        __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-        frame_->EmitPush(r1);
-      } else {
-        frame_->EmitPush(r2);
-      }
-      __ ldr(r1, frame_->Receiver());
-      frame_->EmitPush(r1);
-
-      // Push the strict mode flag.
-      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
-      frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump();
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
-    frame_->EmitPush(r1);
-    if (arg_count > 0) {
-      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-      frame_->EmitPush(r1);
-    } else {
-      frame_->EmitPush(r2);
-    }
-    __ ldr(r1, frame_->Receiver());
-    frame_->EmitPush(r1);
-
-    // Push the strict mode flag.
-    frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-
-    // Resolve the call.
-    frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind();
-
-    // Touch up stack with the right values for the function and the receiver.
-    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    frame_->CallStub(&call_function, arg_count + 1);
-
-    __ ldr(cp, frame_->Context());
-    // Remove the function from the stack.
-    frame_->Drop();
-    frame_->EmitPush(r0);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-    }
-
-    VirtualFrame::SpilledScope spilled_scope(frame_);
-    // Setup the name register and call the IC initialization code.
-    __ mov(r2, Operand(var->name()));
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub =
-        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-    CodeForSourcePosition(node->position());
-    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
-                           arg_count + 1);
-    __ ldr(cp, frame_->Context());
-    frame_->EmitPush(r0);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Load the function
-    frame_->EmitPush(cp);
-    frame_->EmitPush(Operand(var->name()));
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // r0: slot value; r1: receiver
-
-    // Load the receiver.
-    frame_->EmitPush(r0);  // function
-    frame_->EmitPush(r1);  // receiver
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind();
-      frame_->EmitPush(r0);  // function
-      LoadGlobalReceiver(VirtualFrame::scratch0());  // receiver
-      call.Bind();
-    }
-
-    // Call the function. At this point, everything is spilled but the
-    // function and receiver are in r0 and r1.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-    frame_->EmitPush(r0);
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        Load(property->obj());  // Receiver.
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-        }
-
-        VirtualFrame::SpilledScope spilled_scope(frame_);
-        // Set the name register and call the IC initialization code.
-        __ mov(r2, Operand(name));
-        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-        Handle<Code> stub =
-            ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-        CodeForSourcePosition(node->position());
-        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-        __ ldr(cp, frame_->Context());
-        frame_->EmitPush(r0);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the receiver and name of the function.
-      Load(property->obj());
-      Load(property->key());
-
-      if (property->is_synthetic()) {
-        EmitKeyedLoad();
-        // Put the function below the receiver.
-        // Use the global receiver.
-        frame_->EmitPush(r0);  // Function.
-        LoadGlobalReceiver(VirtualFrame::scratch0());
-        // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-        frame_->EmitPush(r0);
-      } else {
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Register key = frame_->PopToRegister();
-        Register receiver = frame_->PopToRegister(key);
-        frame_->EmitPush(key);
-        frame_->EmitPush(receiver);
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-        }
-
-        // Load the key into r2 and call the IC initialization code.
-        InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-        Handle<Code> stub =
-            ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count,
-                                                              in_loop);
-        CodeForSourcePosition(node->position());
-        frame_->SpillAll();
-        __ ldr(r2, frame_->ElementAt(arg_count + 1));
-        frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-        frame_->Drop();  // Drop the key still on the stack.
-        __ ldr(cp, frame_->Context());
-        frame_->EmitPush(r0);
-      }
-    }
-
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver(VirtualFrame::scratch0());
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-    frame_->EmitPush(r0);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Spill everything from here to simplify the implementation.
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  // Load the argument count into r0 and the function into r1 as per
-  // calling convention.
-  __ mov(r0, Operand(arg_count));
-  __ ldr(r1, frame_->ElementAt(arg_count));
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  frame_->CallCodeObject(ic, RelocInfo::CONSTRUCT_CALL, arg_count + 1);
-  frame_->EmitPush(r0);
-
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  Register scratch = VirtualFrame::scratch0();
-  JumpTarget null, function, leave, non_function_constructor;
-
-  // Load the object into register.
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register tos = frame_->PopToRegister();
-
-  // If the object is a smi, we return null.
-  __ tst(tos, Operand(kSmiTagMask));
-  null.Branch(eq);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-  __ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
-  null.Branch(lt);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ cmp(scratch, Operand(JS_FUNCTION_TYPE));
-  function.Branch(eq);
-
-  // Check if the constructor in the map is a function.
-  __ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
-  __ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
-  non_function_constructor.Branch(ne);
-
-  // The tos register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(tos,
-         FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->EmitPush(tos);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  __ mov(tos, Operand(FACTORY->function_class_symbol()));
-  frame_->EmitPush(tos);
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  __ mov(tos, Operand(FACTORY->Object_symbol()));
-  frame_->EmitPush(tos);
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  __ LoadRoot(tos, Heap::kNullValueRootIndex);
-  frame_->EmitPush(tos);
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  Register scratch = VirtualFrame::scratch0();
-  JumpTarget leave;
-
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register tos = frame_->PopToRegister();  // tos contains object.
-  // if (object->IsSmi()) return the object.
-  __ tst(tos, Operand(kSmiTagMask));
-  leave.Branch(eq);
-  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
-  __ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
-  leave.Branch(ne);
-  // Load the value.
-  __ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
-  leave.Bind();
-  frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  JumpTarget leave;
-
-  ASSERT(args->length() == 2);
-  Load(args->at(0));    // Load the object.
-  Load(args->at(1));    // Load the value.
-  Register value = frame_->PopToRegister();
-  Register object = frame_->PopToRegister(value);
-  // if (object->IsSmi()) return object.
-  __ tst(object, Operand(kSmiTagMask));
-  leave.Branch(eq);
-  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
-  __ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
-  leave.Branch(ne);
-  // Store the value.
-  __ str(value, FieldMemOperand(object, JSValue::kValueOffset));
-  // Update the write barrier.
-  __ RecordWrite(object,
-                 Operand(JSValue::kValueOffset - kHeapObjectTag),
-                 scratch1,
-                 scratch2);
-  // Leave.
-  leave.Bind();
-  frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register reg = frame_->PopToRegister();
-  __ tst(reg, Operand(kSmiTagMask));
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register reg = frame_->PopToRegister();
-  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
-  cc_reg_ = eq;
-}
-
-
-// Generates the Math.pow method.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-
-  if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    frame_->CallRuntime(Runtime::kMath_pow, 2);
-    frame_->EmitPush(r0);
-  } else {
-    CpuFeatures::Scope scope(VFP3);
-    JumpTarget runtime, done;
-    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
-
-    Register scratch1 = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-
-    // Get base and exponent to registers.
-    Register exponent = frame_->PopToRegister();
-    Register base = frame_->PopToRegister(exponent);
-    Register heap_number_map = no_reg;
-
-    // Set the frame for the runtime jump target. The code below jumps to the
-    // jump target label so the frame needs to be established before that.
-    ASSERT(runtime.entry_frame() == NULL);
-    runtime.set_entry_frame(frame_);
-
-    __ JumpIfNotSmi(exponent, &exponent_nonsmi);
-    __ JumpIfNotSmi(base, &base_nonsmi);
-
-    heap_number_map = r6;
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-    // Exponent is a smi and base is a smi. Get the smi value into vfp register
-    // d1.
-    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
-    __ b(&powi);
-
-    __ bind(&base_nonsmi);
-    // Exponent is smi and base is non smi. Get the double value from the base
-    // into vfp register d1.
-    __ ObjectToDoubleVFPRegister(base, d1,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label());
-
-    __ bind(&powi);
-
-    // Load 1.0 into d0.
-    __ vmov(d0, 1.0);
-
-    // Get the absolute untagged value of the exponent and use that for the
-    // calculation.
-    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
-    // Negate if negative.
-    __ rsb(scratch1, scratch1, Operand(0, RelocInfo::NONE), LeaveCC, mi);
-    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
-
-    // Run through all the bits in the exponent. The result is calculated in d0
-    // and d1 holds base^(bit^2).
-    Label more_bits;
-    __ bind(&more_bits);
-    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
-    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
-    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
-    __ b(ne, &more_bits);
-
-    // If exponent is positive we are done.
-    __ cmp(exponent, Operand(0, RelocInfo::NONE));
-    __ b(ge, &allocate_return);
-
-    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
-    // case). However if d0 has reached infinity this will not provide the
-    // correct result, so call runtime if that is the case.
-    __ mov(scratch2, Operand(0x7FF00000));
-    __ mov(scratch1, Operand(0, RelocInfo::NONE));
-    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
-    __ VFPCompareAndSetFlags(d0, d1);
-    runtime.Branch(eq);  // d0 reached infinity.
-    __ vdiv(d0, d2, d0);
-    __ b(&allocate_return);
-
-    __ bind(&exponent_nonsmi);
-    // Special handling of raising to the power of -0.5 and 0.5. First check
-    // that the value is a heap number and that the lower bits (which for both
-    // values are zero).
-    heap_number_map = r6;
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-    __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
-    __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
-    __ cmp(scratch1, heap_number_map);
-    runtime.Branch(ne);
-    __ tst(scratch2, scratch2);
-    runtime.Branch(ne);
-
-    // Load the higher bits (which contains the floating point exponent).
-    __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
-
-    // Compare exponent with -0.5.
-    __ cmp(scratch1, Operand(0xbfe00000));
-    __ b(ne, &not_minus_half);
-
-    // Get the double value from the base into vfp register d0.
-    __ ObjectToDoubleVFPRegister(base, d0,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label(),
-                                 AVOID_NANS_AND_INFINITIES);
-
-    // Convert -0 into +0 by adding +0.
-    __ vmov(d2, 0.0);
-    __ vadd(d0, d2, d0);
-    // Load 1.0 into d2.
-    __ vmov(d2, 1.0);
-
-    // Calculate the reciprocal of the square root.
-    __ vsqrt(d0, d0);
-    __ vdiv(d0, d2, d0);
-
-    __ b(&allocate_return);
-
-    __ bind(&not_minus_half);
-    // Compare exponent with 0.5.
-    __ cmp(scratch1, Operand(0x3fe00000));
-    runtime.Branch(ne);
-
-      // Get the double value from the base into vfp register d0.
-    __ ObjectToDoubleVFPRegister(base, d0,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label(),
-                                 AVOID_NANS_AND_INFINITIES);
-    // Convert -0 into +0 by adding +0.
-    __ vmov(d2, 0.0);
-    __ vadd(d0, d2, d0);
-    __ vsqrt(d0, d0);
-
-    __ bind(&allocate_return);
-    Register scratch3 = r5;
-    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
-                                   heap_number_map, runtime.entry_label());
-    __ mov(base, scratch3);
-    done.Jump();
-
-    runtime.Bind();
-
-    // Push back the arguments again for the runtime call.
-    frame_->EmitPush(base);
-    frame_->EmitPush(exponent);
-    frame_->CallRuntime(Runtime::kMath_pow, 2);
-    __ Move(base, r0);
-
-    done.Bind();
-    frame_->EmitPush(base);
-  }
-}
-
-
-// Generates the Math.sqrt method.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-
-  if (!Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
-    frame_->EmitPush(r0);
-  } else {
-    CpuFeatures::Scope scope(VFP3);
-    JumpTarget runtime, done;
-
-    Register scratch1 = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-
-    // Get the value from the frame.
-    Register tos = frame_->PopToRegister();
-
-    // Set the frame for the runtime jump target. The code below jumps to the
-    // jump target label so the frame needs to be established before that.
-    ASSERT(runtime.entry_frame() == NULL);
-    runtime.set_entry_frame(frame_);
-
-    Register heap_number_map = r6;
-    Register new_heap_number = r5;
-    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
-    // Get the double value from the heap number into vfp register d0.
-    __ ObjectToDoubleVFPRegister(tos, d0,
-                                 scratch1, scratch2, heap_number_map, s0,
-                                 runtime.entry_label());
-
-    // Calculate the square root of d0 and place result in a heap number object.
-    __ vsqrt(d0, d0);
-    __ AllocateHeapNumberWithValue(new_heap_number,
-                                   d0,
-                                   scratch1, scratch2,
-                                   heap_number_map,
-                                   runtime.entry_label());
-    __ mov(tos, Operand(new_heap_number));
-    done.Jump();
-
-    runtime.Bind();
-    // Push back the argument again for the runtime call.
-    frame_->EmitPush(tos);
-    frame_->CallRuntime(Runtime::kMath_sqrt, 1);
-    __ Move(tos, r0);
-
-    done.Bind();
-    frame_->EmitPush(tos);
-  }
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ LoadRoot(result_, Heap::kNanValueRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Register index = frame_->PopToRegister();
-  Register object = frame_->PopToRegister(index);
-
-  // We need two extra registers.
-  Register scratch = VirtualFrame::scratch0();
-  Register result = VirtualFrame::scratch1();
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object,
-                                   index,
-                                   scratch,
-                                   result);
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Register result = frame_->GetTOSRegister();
-  Register code = frame_->PopToRegister(result);
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code, result);
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ mov(result_, Operand(Smi::FromInt(0)));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Register index = frame_->PopToRegister();
-  Register object = frame_->PopToRegister(index);
-
-  // We need three extra registers.
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  // Use r6 without notifying the virtual frame.
-  Register result = r6;
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object,
-                               index,
-                               scratch1,
-                               scratch2,
-                               result);
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  JumpTarget answer;
-  // We need the CC bits to come out as not_equal in the case where the
-  // object is a smi.  This can't be done with the usual test opcode so
-  // we use XOR to get the right CC bits.
-  Register possible_array = frame_->PopToRegister();
-  Register scratch = VirtualFrame::scratch0();
-  __ and_(scratch, possible_array, Operand(kSmiTagMask));
-  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
-  answer.Branch(ne);
-  // It is a heap object - get the map. Check if the object is a JS array.
-  __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
-  answer.Bind();
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  JumpTarget answer;
-  // We need the CC bits to come out as not_equal in the case where the
-  // object is a smi.  This can't be done with the usual test opcode so
-  // we use XOR to get the right CC bits.
-  Register possible_regexp = frame_->PopToRegister();
-  Register scratch = VirtualFrame::scratch0();
-  __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
-  __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
-  answer.Branch(ne);
-  // It is a heap object - get the map. Check if the object is a regexp.
-  __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
-  answer.Bind();
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register possible_object = frame_->PopToRegister();
-  __ tst(possible_object, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(possible_object, ip);
-  true_target()->Branch(eq);
-
-  Register map_reg = VirtualFrame::scratch0();
-  __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-  __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
-  false_target()->Branch(ne);
-
-  __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
-  __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
-  false_target()->Branch(lt);
-  __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
-  cc_reg_ = le;
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-  __ tst(value, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-  // Check that this is an object.
-  __ ldr(value, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ ldrb(value, FieldMemOperand(value, Map::kInstanceTypeOffset));
-  __ cmp(value, Operand(FIRST_JS_OBJECT_TYPE));
-  cc_reg_ = ge;
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ ldr(ip, FieldMemOperand(object_, HeapObject::kMapOffset));
-      __ cmp(map_result_, ip);
-      __ Assert(eq, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ ldr(scratch1_, FieldMemOperand(object_, JSObject::kPropertiesOffset));
-    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-    __ cmp(scratch1_, ip);
-    __ b(eq, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ ldr(map_result_,
-           FieldMemOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ ldr(scratch2_, FieldMemOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch2_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kPointerSize == 4);
-    __ add(scratch1_,
-           map_result_,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ add(scratch1_,
-           scratch1_,
-           Operand(scratch2_, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-    // Calculate location of the first key name.
-    __ add(map_result_,
-           map_result_,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag +
-                   DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    // The use of ip to store the valueOf symbol asumes that it is not otherwise
-    // used in the loop below.
-    __ mov(ip, Operand(FACTORY->value_of_symbol()));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ ldr(scratch2_, MemOperand(map_result_, 0));
-    __ cmp(scratch2_, ip);
-    __ b(eq, &false_result);
-    __ add(map_result_, map_result_, Operand(kPointerSize));
-    __ bind(&entry);
-    __ cmp(map_result_, Operand(scratch1_));
-    __ b(ne, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ ldr(map_result_, FieldMemOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ ldr(scratch1_, FieldMemOperand(map_result_, Map::kPrototypeOffset));
-    __ tst(scratch1_, Operand(kSmiTagMask));
-    __ b(eq, &false_result);
-    __ ldr(scratch1_, FieldMemOperand(scratch1_, HeapObject::kMapOffset));
-    __ ldr(scratch2_,
-           ContextOperand(cp, Context::GLOBAL_INDEX));
-    __ ldr(scratch2_,
-           FieldMemOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ ldr(scratch2_,
-           ContextOperand(
-               scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ cmp(scratch1_, scratch2_);
-    __ b(ne, &false_result);
-
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
-    __ orr(scratch1_,
-           scratch1_,
-           Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
-    __ mov(map_result_, Operand(1));
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ mov(map_result_, Operand(0, RelocInfo::NONE));
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register obj = frame_->PopToRegister();  // Pop the string wrapper.
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj);
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Register map_result = VirtualFrame::scratch0();
-  __ ldr(map_result, FieldMemOperand(obj, HeapObject::kMapOffset));
-  __ ldrb(ip, FieldMemOperand(map_result, Map::kBitField2Offset));
-  __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
-  true_target()->Branch(ne);
-
-  // We need an additional two scratch registers for the deferred code.
-  Register scratch1 = VirtualFrame::scratch1();
-  // Use r6 without notifying the virtual frame.
-  Register scratch2 = r6;
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj, map_result, scratch1, scratch2);
-  deferred->Branch(eq);
-  deferred->BindExit();
-  __ tst(map_result, Operand(map_result));
-  cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register possible_function = frame_->PopToRegister();
-  __ tst(possible_function, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-  Register map_reg = VirtualFrame::scratch0();
-  Register scratch = VirtualFrame::scratch1();
-  __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register possible_undetectable = frame_->PopToRegister();
-  __ tst(possible_undetectable, Operand(kSmiTagMask));
-  false_target()->Branch(eq);
-  Register scratch = VirtualFrame::scratch0();
-  __ ldr(scratch,
-         FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
-  __ tst(scratch, Operand(1 << Map::kIsUndetectable));
-  cc_reg_ = ne;
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Register scratch0 = VirtualFrame::scratch0();
-  Register scratch1 = VirtualFrame::scratch1();
-  // Get the frame pointer for the calling frame.
-  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  __ ldr(scratch1,
-         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ ldr(scratch0,
-         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
-
-  // Check the marker in the calling frame.
-  __ ldr(scratch1,
-         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Register tos = frame_->GetTOSRegister();
-  Register scratch0 = VirtualFrame::scratch0();
-  Register scratch1 = VirtualFrame::scratch1();
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ ldr(scratch0,
-         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(scratch1,
-         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Get the number of formal parameters.
-  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ ldr(tos,
-         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
-         eq);
-
-  frame_->EmitPush(tos);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // Satisfy contract with ArgumentsAccessStub:
-  // Load the key into r1 and the formal parameters count into r0.
-  Load(args->at(0));
-  frame_->PopToR1();
-  frame_->SpillAll();
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
-
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  frame_->CallStub(&stub, 0);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  ASSERT(args->length() == 0);
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-
-  __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-  __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ mov(r4, Operand(r0));
-
-  __ bind(&heapnumber_allocated);
-
-  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    __ PrepareCallCFunction(0, r1);
-    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
-
-    CpuFeatures::Scope scope(VFP3);
-    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
-    // Create this constant using mov/orr to avoid PC relative load.
-    __ mov(r1, Operand(0x41000000));
-    __ orr(r1, r1, Operand(0x300000));
-    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
-    __ vmov(d7, r0, r1);
-    // Move 0x4130000000000000 to VFP.
-    __ mov(r0, Operand(0, RelocInfo::NONE));
-    __ vmov(d8, r0, r1);
-    // Subtract and store the result in the heap number.
-    __ vsub(d7, d7, d8);
-    __ sub(r0, r4, Operand(kHeapObjectTag));
-    __ vstr(d7, r0, HeapNumber::kValueOffset);
-    frame_->EmitPush(r4);
-  } else {
-    __ mov(r0, Operand(r4));
-    __ PrepareCallCFunction(1, r1);
-    __ CallCFunction(
-        ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
-    frame_->EmitPush(r0);
-  }
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 2);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 3);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 2);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(4, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-  RegExpExecStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 4);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-  RegExpConstructResultStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 3);
-  frame_->EmitPush(r0);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_, cache_, key_;
-};
-
-
-void DeferredSearchCache::Generate() {
-  __ Push(cache_, key_);
-  __ CallRuntime(Runtime::kGetFromCache, 2);
-  __ Move(dst_, r0);
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      Isolate::Current()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-    return;
-  }
-
-  Load(args->at(1));
-
-  frame_->PopToR1();
-  frame_->SpillAll();
-  Register key = r1;  // Just poped to r1
-  Register result = r0;  // Free, as frame has just been spilled.
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-
-  __ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(scratch1,
-         FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
-  __ ldr(scratch1,
-         ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ ldr(scratch1,
-         FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
-
-  DeferredSearchCache* deferred =
-      new DeferredSearchCache(result, scratch1, key);
-
-  const int kFingerOffset =
-      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
-  // result now holds finger offset as a smi.
-  __ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // scratch2 now points to the start of fixed array elements.
-  __ ldr(result,
-         MemOperand(
-             scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
-  // Note side effect of PreIndex: scratch2 now points to the key of the pair.
-  __ cmp(key, result);
-  deferred->Branch(ne);
-
-  __ ldr(result, MemOperand(scratch2, kPointerSize));
-
-  deferred->BindExit();
-  frame_->EmitPush(result);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and jump to the runtime.
-  Load(args->at(0));
-
-  NumberToStringStub stub;
-  frame_->SpillAll();
-  frame_->CallStub(&stub, 1);
-  frame_->EmitPush(r0);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  Register index2 = r2;
-  Register index1 = r1;
-  Register object = r0;
-  Register tmp1 = r3;
-  Register tmp2 = r4;
-
-  frame_->EmitPop(index2);
-  frame_->EmitPop(index1);
-  frame_->EmitPop(object);
-
-  DeferredSwapElements* deferred =
-      new DeferredSwapElements(object, index1, index2);
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
-  deferred->Branch(ne);
-  __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
-  __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
-  deferred->Branch(ne);
-
-  // Check the object's elements are in fast case and writable.
-  __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(tmp2, ip);
-  deferred->Branch(ne);
-
-  // Smi-tagging is equivalent to multiplying by 2.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  // Check that both indices are smis.
-  __ mov(tmp2, index1);
-  __ orr(tmp2, tmp2, index2);
-  __ tst(tmp2, Operand(kSmiTagMask));
-  deferred->Branch(ne);
-
-  // Check that both indices are valid.
-  __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
-  __ cmp(tmp2, index1);
-  __ cmp(tmp2, index2, hi);
-  deferred->Branch(ls);
-
-  // Bring the offsets into the fixed array in tmp1 into index1 and
-  // index2.
-  __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(index1, tmp2, Operand(index1, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ add(index2, tmp2, Operand(index2, LSL, kPointerSizeLog2 - kSmiTagSize));
-
-  // Swap elements.
-  Register tmp3 = object;
-  object = no_reg;
-  __ ldr(tmp3, MemOperand(tmp1, index1));
-  __ ldr(tmp2, MemOperand(tmp1, index2));
-  __ str(tmp3, MemOperand(tmp1, index2));
-  __ str(tmp2, MemOperand(tmp1, index1));
-
-  Label done;
-  __ InNewSpace(tmp1, tmp2, eq, &done);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  __ mov(tmp2, tmp1);
-  __ add(index1, index1, tmp1);
-  __ add(index2, index2, tmp1);
-  __ RecordWriteHelper(tmp1, index1, tmp3);
-  __ RecordWriteHelper(tmp2, index2, tmp3);
-  __ bind(&done);
-
-  deferred->BindExit();
-  __ LoadRoot(tmp1, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(tmp1);
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  frame_->CallJSFunction(n_args);
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                                 TranscendentalCacheStub::TAGGED);
-    frame_->SpillAllButCopyTOSToR0();
-    frame_->CallStub(&stub, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kMath_sin, 1);
-  }
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    TranscendentalCacheStub stub(TranscendentalCache::COS,
-                                 TranscendentalCacheStub::TAGGED);
-    frame_->SpillAllButCopyTOSToR0();
-    frame_->CallStub(&stub, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kMath_cos, 1);
-  }
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
-    TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                                 TranscendentalCacheStub::TAGGED);
-    frame_->SpillAllButCopyTOSToR0();
-    frame_->CallStub(&stub, 1);
-  } else {
-    frame_->CallRuntime(Runtime::kMath_log, 1);
-  }
-  frame_->EmitPush(r0);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Register lhs = frame_->PopToRegister();
-  Register rhs = frame_->PopToRegister(lhs);
-  __ cmp(lhs, rhs);
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Register right = frame_->PopToRegister();
-  Register left = frame_->PopToRegister(right);
-  Register tmp = frame_->scratch0();
-  Register tmp2 = frame_->scratch1();
-
-  // Jumps to done must have the eq flag set if the test is successful
-  // and clear if the test has failed.
-  Label done;
-
-  // Fail if either is a non-HeapObject.
-  __ cmp(left, Operand(right));
-  __ b(eq, &done);
-  __ and_(tmp, left, Operand(right));
-  __ eor(tmp, tmp, Operand(kSmiTagMask));
-  __ tst(tmp, Operand(kSmiTagMask));
-  __ b(ne, &done);
-  __ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
-  __ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
-  __ cmp(tmp2, Operand(JS_REGEXP_TYPE));
-  __ b(ne, &done);
-  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
-  __ cmp(tmp, Operand(tmp2));
-  __ b(ne, &done);
-  __ ldr(tmp, FieldMemOperand(left, JSRegExp::kDataOffset));
-  __ ldr(tmp2, FieldMemOperand(right, JSRegExp::kDataOffset));
-  __ cmp(tmp, tmp2);
-  __ bind(&done);
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-  Register tmp = frame_->scratch0();
-  __ ldr(tmp, FieldMemOperand(value, String::kHashFieldOffset));
-  __ tst(tmp, Operand(String::kContainsCachedArrayIndexMask));
-  cc_reg_ = eq;
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-
-  __ ldr(value, FieldMemOperand(value, String::kHashFieldOffset));
-  __ IndexFromHash(value, value);
-  frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Register value = frame_->PopToRegister();
-  __ LoadRoot(value, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(value);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  if (CheckForInlineRuntimeCall(node)) {
-    ASSERT((has_cc() && frame_->height() == original_height) ||
-           (!has_cc() && frame_->height() == original_height + 1));
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Prepare stack for calling JS runtime function.
-    // Push the builtins object found in the current global object.
-    Register scratch = VirtualFrame::scratch0();
-    __ ldr(scratch, GlobalObjectOperand());
-    Register builtins = frame_->GetTOSRegister();
-    __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
-    frame_->EmitPush(builtins);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    __ mov(r2, Operand(node->name()));
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    Handle<Code> stub =
-        ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-    frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
-    __ ldr(cp, frame_->Context());
-    frame_->EmitPush(r0);
-  } else {
-    // Call the C runtime function.
-    frame_->CallRuntime(function, arg_count);
-    frame_->EmitPush(r0);
-  }
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    LoadCondition(node->expression(), false_target(), true_target(), true);
-    // LoadCondition may (and usually does) leave a test and branch to
-    // be emitted by the caller.  In that case, negate the condition.
-    if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
-      frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
-      frame_->EmitPush(r0);
-
-    } else if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->EmitPush(Operand(variable->name()));
-        frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
-        frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
-        frame_->EmitPush(r0);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Delete from the context holding the named variable.
-        frame_->EmitPush(cp);
-        frame_->EmitPush(Operand(variable->name()));
-        frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->EmitPush(r0);
-
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
-      }
-
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->Drop();
-      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->EmitPush(r0);  // r0 has result
-
-  } else {
-    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-    UnaryOverwriteMode overwrite =
-        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-
-    bool no_negative_zero = node->expression()->no_negative_zero();
-    Load(node->expression());
-    switch (op) {
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-        UNREACHABLE();  // handled above
-        break;
-
-      case Token::SUB: {
-        frame_->PopToR0();
-        GenericUnaryOpStub stub(
-            Token::SUB,
-            overwrite,
-            NO_UNARY_FLAGS,
-            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-        frame_->CallStub(&stub, 0);
-        frame_->EmitPush(r0);  // r0 has result
-        break;
-      }
-
-      case Token::BIT_NOT: {
-        Register tos = frame_->PopToRegister();
-        JumpTarget not_smi_label;
-        JumpTarget continue_label;
-        // Smi check.
-        __ tst(tos, Operand(kSmiTagMask));
-        not_smi_label.Branch(ne);
-
-        __ mvn(tos, Operand(tos));
-        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
-        frame_->EmitPush(tos);
-        // The fast case is the first to jump to the continue label, so it gets
-        // to decide the virtual frame layout.
-        continue_label.Jump();
-
-        not_smi_label.Bind();
-        frame_->SpillAll();
-        __ Move(r0, tos);
-        GenericUnaryOpStub stub(Token::BIT_NOT,
-                                overwrite,
-                                NO_UNARY_SMI_CODE_IN_STUB);
-        frame_->CallStub(&stub, 0);
-        frame_->EmitPush(r0);
-
-        continue_label.Bind();
-        break;
-      }
-
-      case Token::VOID:
-        frame_->Drop();
-        frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
-        break;
-
-      case Token::ADD: {
-        Register tos = frame_->Peek();
-        // Smi check.
-        JumpTarget continue_label;
-        __ tst(tos, Operand(kSmiTagMask));
-        continue_label.Branch(eq);
-
-        frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-        frame_->EmitPush(r0);
-
-        continue_label.Bind();
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-  }
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-class DeferredCountOperation: public DeferredCode {
- public:
-  DeferredCountOperation(Register value,
-                         bool is_increment,
-                         bool is_postfix,
-                         int target_size)
-      : value_(value),
-        is_increment_(is_increment),
-        is_postfix_(is_postfix),
-        target_size_(target_size) {}
-
-  virtual void Generate() {
-    VirtualFrame copied_frame(*frame_state()->frame());
-
-    Label slow;
-    // Check for smi operand.
-    __ tst(value_, Operand(kSmiTagMask));
-    __ b(ne, &slow);
-
-    // Revert optimistic increment/decrement.
-    if (is_increment_) {
-      __ sub(value_, value_, Operand(Smi::FromInt(1)));
-    } else {
-      __ add(value_, value_, Operand(Smi::FromInt(1)));
-    }
-
-    // Slow case: Convert to number.  At this point the
-    // value to be incremented is in the value register..
-    __ bind(&slow);
-
-    // Convert the operand to a number.
-    copied_frame.EmitPush(value_);
-
-    copied_frame.InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-
-    if (is_postfix_) {
-      // Postfix: store to result (on the stack).
-      __ str(r0,  MemOperand(sp, target_size_ * kPointerSize));
-    }
-
-    copied_frame.EmitPush(r0);
-    copied_frame.EmitPush(Operand(Smi::FromInt(1)));
-
-    if (is_increment_) {
-      copied_frame.CallRuntime(Runtime::kNumberAdd, 2);
-    } else {
-      copied_frame.CallRuntime(Runtime::kNumberSub, 2);
-    }
-
-    __ Move(value_, r0);
-
-    copied_frame.MergeTo(frame_state()->frame());
-  }
-
- private:
-  Register value_;
-  bool is_increment_;
-  bool is_postfix_;
-  int target_size_;
-};
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CountOperation");
-  VirtualFrame::RegisterAllocationScope scope(this);
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-  bool is_slot = (var != NULL && var->mode() == Variable::VAR);
-
-  if (!is_const && is_slot && type_info(var->AsSlot()).IsSmi()) {
-    // The type info declares that this variable is always a Smi.  That
-    // means it is a Smi both before and after the increment/decrement.
-    // Lets make use of that to make a very minimal count.
-    Reference target(this, node->expression(), !is_const);
-    ASSERT(!target.is_illegal());
-    target.GetValue();  // Pushes the value.
-    Register value = frame_->PopToRegister();
-    if (is_postfix) frame_->EmitPush(value);
-    if (is_increment) {
-      __ add(value, value, Operand(Smi::FromInt(1)));
-    } else {
-      __ sub(value, value, Operand(Smi::FromInt(1)));
-    }
-    frame_->EmitPush(value);
-    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
-    if (is_postfix) frame_->Pop();
-    ASSERT_EQ(original_height + 1, frame_->height());
-    return;
-  }
-
-  // If it's a postfix expression and its result is not ignored and the
-  // reference is non-trivial, then push a placeholder on the stack now
-  // to hold the result of the expression.
-  bool placeholder_pushed = false;
-  if (!is_slot && is_postfix) {
-    frame_->EmitPush(Operand(Smi::FromInt(0)));
-    placeholder_pushed = true;
-  }
-
-  // A constant reference is not saved to, so a constant reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!placeholder_pushed) frame_->EmitPush(Operand(Smi::FromInt(0)));
-      ASSERT_EQ(original_height + 1, frame_->height());
-      return;
-    }
-
-    // This pushes 0, 1 or 2 words on the object to be used later when updating
-    // the target.  It also pushes the current value of the target.
-    target.GetValue();
-
-    bool value_is_known_smi = frame_->KnownSmiAt(0);
-    Register value = frame_->PopToRegister();
-
-    // Postfix: Store the old value as the result.
-    if (placeholder_pushed) {
-      frame_->SetElementAt(value, target.size());
-    } else if (is_postfix) {
-      frame_->EmitPush(value);
-      __ mov(VirtualFrame::scratch0(), value);
-      value = VirtualFrame::scratch0();
-    }
-
-    // We can't use any type information here since the virtual frame from the
-    // deferred code may have lost information and we can't merge a virtual
-    // frame with less specific type knowledge to a virtual frame with more
-    // specific knowledge that has already used that specific knowledge to
-    // generate code.
-    frame_->ForgetTypeInfo();
-
-    // The constructor here will capture the current virtual frame and use it to
-    // merge to after the deferred code has run.  No virtual frame changes are
-    // allowed from here until the 'BindExit' below.
-    DeferredCode* deferred =
-        new DeferredCountOperation(value,
-                                   is_increment,
-                                   is_postfix,
-                                   target.size());
-    if (!value_is_known_smi) {
-      // Check for smi operand.
-      __ tst(value, Operand(kSmiTagMask));
-
-      deferred->Branch(ne);
-    }
-
-    // Perform optimistic increment/decrement.
-    if (is_increment) {
-      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
-    } else {
-      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
-    }
-
-    // If increment/decrement overflows, go to deferred code.
-    deferred->Branch(vs);
-
-    deferred->BindExit();
-
-    // Store the new value in the target if not const.
-    // At this point the answer is in the value register.
-    frame_->EmitPush(value);
-    // Set the target with the result, leaving the result on
-    // top of the stack.  Removes the target from the stack if
-    // it has a non-zero size.
-    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
-  }
-
-  // Postfix: Discard the new value and use the old.
-  if (is_postfix) frame_->Pop();
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not in
-  // the CC register), we force the right hand side to do the
-  // same. This is necessary because we may have to branch to the exit
-  // after evaluating the left hand side (due to the shortcut
-  // semantics), but the compiler must (statically) know if the result
-  // of compiling the binary operation is materialized or not.
-  if (node->op() == Token::AND) {
-    JumpTarget is_true;
-    LoadCondition(node->left(), &is_true, false_target(), false);
-    if (has_valid_frame() && !has_cc()) {
-      // The left-hand side result is on top of the virtual frame.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      frame_->Dup();
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      ToBoolean(&pop_and_continue, &exit);
-      Branch(false, &exit);
-
-      // Pop the result of evaluating the first part.
-      pop_and_continue.Bind();
-      frame_->Pop();
-
-      // Evaluate right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    } else if (has_cc() || is_true.is_linked()) {
-      // The left-hand side is either (a) partially compiled to
-      // control flow with a final branch left to emit or (b) fully
-      // compiled to control flow and possibly true.
-      if (has_cc()) {
-        Branch(false, false_target());
-      }
-      is_true.Bind();
-      LoadCondition(node->right(), true_target(), false_target(), false);
-    } else {
-      // Nothing to do.
-      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    JumpTarget is_false;
-    LoadCondition(node->left(), true_target(), &is_false, false);
-    if (has_valid_frame() && !has_cc()) {
-      // The left-hand side result is on top of the virtual frame.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      frame_->Dup();
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      ToBoolean(&exit, &pop_and_continue);
-      Branch(true, &exit);
-
-      // Pop the result of evaluating the first part.
-      pop_and_continue.Bind();
-      frame_->Pop();
-
-      // Evaluate right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    } else if (has_cc() || is_false.is_linked()) {
-      // The left-hand side is either (a) partially compiled to
-      // control flow with a final branch left to emit or (b) fully
-      // compiled to control flow and possibly false.
-      if (has_cc()) {
-        Branch(true, true_target());
-      }
-      is_false.Bind();
-      LoadCondition(node->right(), true_target(), false_target(), false);
-    } else {
-      // Nothing to do.
-      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
-    }
-  }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else {
-    // Optimize for the case where (at least) one of the expressions
-    // is a literal small integer.
-    Literal* lliteral = node->left()->AsLiteral();
-    Literal* rliteral = node->right()->AsLiteral();
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    bool overwrite_left = node->left()->ResultOverwriteAllowed();
-    bool overwrite_right = node->right()->ResultOverwriteAllowed();
-
-    if (rliteral != NULL && rliteral->handle()->IsSmi()) {
-      VirtualFrame::RegisterAllocationScope scope(this);
-      Load(node->left());
-      if (frame_->KnownSmiAt(0)) overwrite_left = false;
-      SmiOperation(node->op(),
-                   rliteral->handle(),
-                   false,
-                   overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
-    } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
-      VirtualFrame::RegisterAllocationScope scope(this);
-      Load(node->right());
-      if (frame_->KnownSmiAt(0)) overwrite_right = false;
-      SmiOperation(node->op(),
-                   lliteral->handle(),
-                   true,
-                   overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    } else {
-      GenerateInlineSmi inline_smi =
-          loop_nesting() > 0 ? GENERATE_INLINE_SMI : DONT_GENERATE_INLINE_SMI;
-      if (lliteral != NULL) {
-        ASSERT(!lliteral->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      if (rliteral != NULL) {
-        ASSERT(!rliteral->handle()->IsSmi());
-        inline_smi = DONT_GENERATE_INLINE_SMI;
-      }
-      VirtualFrame::RegisterAllocationScope scope(this);
-      OverwriteMode overwrite_mode = NO_OVERWRITE;
-      if (overwrite_left) {
-        overwrite_mode = OVERWRITE_LEFT;
-      } else if (overwrite_right) {
-        overwrite_mode = OVERWRITE_RIGHT;
-      }
-      Load(node->left());
-      Load(node->right());
-      GenericBinaryOperation(node->op(), overwrite_mode, inline_smi);
-    }
-  }
-  ASSERT(!has_valid_frame() ||
-         (has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  frame_->EmitPush(MemOperand(frame_->Function()));
-  ASSERT_EQ(original_height + 1, frame_->height());
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  VirtualFrame::RegisterAllocationScope nonspilled_scope(this);
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
-    // Load the operand, move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Register tos = frame_->PopToRegister();
-
-    Register scratch = VirtualFrame::scratch0();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      true_target()->Branch(eq);
-      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-      __ cmp(tos, ip);
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-
-      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-
-      // It can be an undetectable string object.
-      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
-      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
-      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-      false_target()->Branch(eq);
-
-      __ ldrb(scratch, FieldMemOperand(tos, Map::kInstanceTypeOffset));
-      __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
-      cc_reg_ = lt;
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-      __ cmp(tos, ip);
-      true_target()->Branch(eq);
-      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-      __ cmp(tos, ip);
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-      __ cmp(tos, ip);
-      true_target()->Branch(eq);
-
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-
-      // It can be an undetectable object.
-      __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-      __ ldrb(scratch, FieldMemOperand(tos, Map::kBitFieldOffset));
-      __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
-      __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
-
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-      Register map_reg = scratch;
-      __ CompareObjectType(tos, map_reg, tos, JS_FUNCTION_TYPE);
-      true_target()->Branch(eq);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CompareInstanceType(map_reg, tos, JS_REGEXP_TYPE);
-      cc_reg_ = eq;
-
-    } else if (check->Equals(HEAP->object_symbol())) {
-      __ tst(tos, Operand(kSmiTagMask));
-      false_target()->Branch(eq);
-
-      __ LoadRoot(ip, Heap::kNullValueRootIndex);
-      __ cmp(tos, ip);
-      true_target()->Branch(eq);
-
-      Register map_reg = scratch;
-      __ CompareObjectType(tos, map_reg, tos, JS_REGEXP_TYPE);
-      false_target()->Branch(eq);
-
-      // It can be an undetectable object.
-      __ ldrb(tos, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-      __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
-      __ cmp(tos, Operand(1 << Map::kIsUndetectable));
-      false_target()->Branch(eq);
-
-      __ ldrb(tos, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
-      __ cmp(tos, Operand(FIRST_JS_OBJECT_TYPE));
-      false_target()->Branch(lt);
-      __ cmp(tos, Operand(LAST_JS_OBJECT_TYPE));
-      cc_reg_ = le;
-
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      false_target()->Jump();
-    }
-    ASSERT(!has_valid_frame() ||
-           (has_cc() && frame_->height() == original_height));
-    return;
-  }
-
-  switch (op) {
-    case Token::EQ:
-      Comparison(eq, left, right, false);
-      break;
-
-    case Token::LT:
-      Comparison(lt, left, right);
-      break;
-
-    case Token::GT:
-      Comparison(gt, left, right);
-      break;
-
-    case Token::LTE:
-      Comparison(le, left, right);
-      break;
-
-    case Token::GTE:
-      Comparison(ge, left, right);
-      break;
-
-    case Token::EQ_STRICT:
-      Comparison(eq, left, right, true);
-      break;
-
-    case Token::IN: {
-      Load(left);
-      Load(right);
-      frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
-      frame_->EmitPush(r0);
-      break;
-    }
-
-    case Token::INSTANCEOF: {
-      Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      frame_->CallStub(&stub, 2);
-      // At this point if instanceof succeeded then r0 == 0.
-      __ tst(r0, Operand(r0));
-      cc_reg_ = eq;
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-  ASSERT((has_cc() && frame_->height() == original_height) ||
-         (!has_cc() && frame_->height() == original_height + 1));
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Register tos = frame_->PopToRegister();
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
-  __ cmp(tos, ip);
-
-  // The 'null' value is only equal to 'undefined' if using non-strict
-  // comparisons.
-  if (!node->is_strict()) {
-    true_target()->Branch(eq);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ cmp(tos, Operand(ip));
-    true_target()->Branch(eq);
-
-    __ tst(tos, Operand(kSmiTagMask));
-    false_target()->Branch(eq);
-
-    // It can be an undetectable object.
-    __ ldr(tos, FieldMemOperand(tos, HeapObject::kMapOffset));
-    __ ldrb(tos, FieldMemOperand(tos, Map::kBitFieldOffset));
-    __ and_(tos, tos, Operand(1 << Map::kIsUndetectable));
-    __ cmp(tos, Operand(1 << Map::kIsUndetectable));
-  }
-
-  cc_reg_ = eq;
-  ASSERT(has_cc() && frame_->height() == original_height);
-}
-
-
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetNamedValue(Register receiver,
-                                          Handle<String> name,
-                                          bool is_contextual)
-      : receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-// Convention for this is that on entry the receiver is in a register that
-// is not used by the stack.  On exit the answer is found in that same
-// register and the stack has the same height.
-void DeferredReferenceGetNamedValue::Generate() {
-#ifdef DEBUG
-  int expected_height = frame_state()->frame()->height();
-#endif
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
-  __ DecrementCounter(masm_->isolate()->counters()->named_load_inline(),
-                      1, scratch1, scratch2);
-  __ IncrementCounter(masm_->isolate()->counters()->named_load_inline_miss(),
-                      1, scratch1, scratch2);
-
-  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
-  __ Move(r0, receiver_);
-  __ mov(r2, Operand(name_));
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        Builtins::kLoadIC_Initialize));
-    RelocInfo::Mode mode = is_contextual_
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    __ Call(ic,  mode);
-    // We must mark the code just after the call with the correct marker.
-    MacroAssembler::NopMarkerTypes code_marker;
-    if (is_contextual_) {
-      code_marker = is_dont_delete_
-                   ? MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE
-                   : MacroAssembler::PROPERTY_ACCESS_INLINED_CONTEXT;
-    } else {
-      code_marker = MacroAssembler::PROPERTY_ACCESS_INLINED;
-    }
-    __ MarkCode(code_marker);
-
-    // At this point the answer is in r0.  We move it to the expected register
-    // if necessary.
-    __ Move(receiver_, r0);
-
-    // Now go back to the frame that we entered with.  This will not overwrite
-    // the receiver register since that register was not in use when we came
-    // in.  The instructions emitted by this merge are skipped over by the
-    // inline load patching mechanism when looking for the branch instruction
-    // that tells it where the code to patch is.
-    copied_frame.MergeTo(frame_state()->frame());
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-  ASSERT_EQ(expected_height, frame_state()->frame()->height());
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceGetKeyedValue(Register key, Register receiver)
-      : key_(key), receiver_(receiver) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register key_;
-  Register receiver_;
-};
-
-
-// Takes key and register in r0 and r1 or vice versa.  Returns result
-// in r0.
-void DeferredReferenceGetKeyedValue::Generate() {
-  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
-         (key_.is(r1) && receiver_.is(r0)));
-
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  __ DecrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
-                      1, scratch1, scratch2);
-  __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline_miss(),
-                      1, scratch1, scratch2);
-
-  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
-  // convention.
-  if (key_.is(r1)) {
-    __ Swap(r0, r1, ip);
-  }
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        Builtins::kKeyedLoadIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // The call must be followed by a nop instruction to indicate that the
-    // keyed load has been inlined.
-    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
-    // Now go back to the frame that we entered with.  This will not overwrite
-    // the receiver or key registers since they were not in use when we came
-    // in.  The instructions emitted by this merge are skipped over by the
-    // inline load patching mechanism when looking for the branch instruction
-    // that tells it where the code to patch is.
-    copied_frame.MergeTo(frame_state()->frame());
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Register scratch1 = VirtualFrame::scratch0();
-  Register scratch2 = VirtualFrame::scratch1();
-  __ DecrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
-                      1, scratch1, scratch2);
-  __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline_miss(),
-                      1, scratch1, scratch2);
-
-  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
-  // calling convention.
-  if (value_.is(r1)) {
-    __ Swap(r0, r1, ip);
-  }
-  ASSERT(receiver_.is(r2));
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed store IC. It has the arguments value, key and receiver in r0,
-    // r1 and r2.
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        (strict_mode_ == kStrictMode)
-        ? Builtins::kKeyedStoreIC_Initialize_Strict
-        : Builtins::kKeyedStoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // The call must be followed by a nop instruction to indicate that the
-    // keyed store has been inlined.
-    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-}
-
-
-class DeferredReferenceSetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceSetNamedValue(Register value,
-                                 Register receiver,
-                                 Handle<String> name,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        receiver_(receiver),
-        name_(name),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetNamedValue");
-  }
-
-  virtual void Generate();
-
- private:
-  Register value_;
-  Register receiver_;
-  Handle<String> name_;
-  StrictModeFlag strict_mode_;
-};
-
-
-// Takes value in r0, receiver in r1 and returns the result (the
-// value) in r0.
-void DeferredReferenceSetNamedValue::Generate() {
-  // Record the entry frame and spill.
-  VirtualFrame copied_frame(*frame_state()->frame());
-  copied_frame.SpillAll();
-
-  // Ensure value in r0, receiver in r1 to match store ic calling
-  // convention.
-  ASSERT(value_.is(r0) && receiver_.is(r1));
-  __ mov(r2, Operand(name_));
-
-  // The rest of the instructions in the deferred code must be together.
-  { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed store IC. It has the arguments value, key and receiver in r0,
-    // r1 and r2.
-    Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-        (strict_mode_ == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                      : Builtins::kStoreIC_Initialize));
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // The call must be followed by a nop instruction to indicate that the
-    // named store has been inlined.
-    __ MarkCode(MacroAssembler::PROPERTY_ACCESS_INLINED);
-
-    // Go back to the frame we entered with. The instructions
-    // generated by this merge are skipped over by the inline store
-    // patching mechanism when looking for the branch instruction that
-    // tells it where the code to patch is.
-    copied_frame.MergeTo(frame_state()->frame());
-
-    // Block the constant pool for one more instruction after leaving this
-    // constant pool block scope to include the branch instruction ending the
-    // deferred code.
-    __ BlockConstPoolFor(1);
-  }
-}
-
-
-// Consumes the top of stack (the receiver) and pushes the result instead.
-void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-  bool contextual_load_in_builtin =
-      is_contextual &&
-      (ISOLATE->bootstrapper()->IsActive() ||
-      (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
-  if (scope()->is_global_scope() ||
-      loop_nesting() == 0 ||
-      contextual_load_in_builtin) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    // Setup the name register and call load IC.
-    frame_->CallLoadIC(name,
-                       is_contextual
-                           ? RelocInfo::CODE_TARGET_CONTEXT
-                           : RelocInfo::CODE_TARGET);
-    frame_->EmitPush(r0);  // Push answer.
-  } else {
-    // Inline the in-object property case.
-    Comment cmnt(masm(), is_contextual
-                             ? "[ Inlined contextual property load"
-                             : "[ Inlined named property load");
-
-    // Counter will be decremented in the deferred code. Placed here to avoid
-    // having it in the instruction stream below where patching will occur.
-    if (is_contextual) {
-      __ IncrementCounter(
-          masm_->isolate()->counters()->named_load_global_inline(),
-          1, frame_->scratch0(), frame_->scratch1());
-    } else {
-      __ IncrementCounter(masm_->isolate()->counters()->named_load_inline(),
-                          1, frame_->scratch0(), frame_->scratch1());
-    }
-
-    // The following instructions are the inlined load of an in-object property.
-    // Parts of this code is patched, so the exact instructions generated needs
-    // to be fixed. Therefore the instruction pool is blocked when generating
-    // this code
-
-    // Load the receiver from the stack.
-    Register receiver = frame_->PopToRegister();
-
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(receiver, name, is_contextual);
-
-    bool is_dont_delete = false;
-    if (is_contextual) {
-      if (!info_->closure().is_null()) {
-        // When doing lazy compilation we can check if the global cell
-        // already exists and use its "don't delete" status as a hint.
-        AssertNoAllocation no_gc;
-        v8::internal::GlobalObject* global_object =
-            info_->closure()->context()->global();
-        LookupResult lookup;
-        global_object->LocalLookupRealNamedProperty(*name, &lookup);
-        if (lookup.IsProperty() && lookup.type() == NORMAL) {
-          ASSERT(lookup.holder() == global_object);
-          ASSERT(global_object->property_dictionary()->ValueAt(
-              lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
-          is_dont_delete = lookup.IsDontDelete();
-        }
-      }
-      if (is_dont_delete) {
-        __ IncrementCounter(
-            masm_->isolate()->counters()->dont_delete_hint_hit(),
-            1, frame_->scratch0(), frame_->scratch1());
-      }
-    }
-
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      if (!is_contextual) {
-        // Check that the receiver is a heap object.
-        __ tst(receiver, Operand(kSmiTagMask));
-        deferred->Branch(eq);
-      }
-
-      // Check for the_hole_value if necessary.
-      // Below we rely on the number of instructions generated, and we can't
-      // cope with the Check macro which does not generate a fixed number of
-      // instructions.
-      Label skip, check_the_hole, cont;
-      if (FLAG_debug_code && is_contextual && is_dont_delete) {
-        __ b(&skip);
-        __ bind(&check_the_hole);
-        __ Check(ne, "DontDelete cells can't contain the hole");
-        __ b(&cont);
-        __ bind(&skip);
-      }
-
-#ifdef DEBUG
-      int InlinedNamedLoadInstructions = 5;
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-
-      Register scratch = VirtualFrame::scratch0();
-      Register scratch2 = VirtualFrame::scratch1();
-
-      // Check the map. The null map used below is patched by the inline cache
-      // code.  Therefore we can't use a LoadRoot call.
-      __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
-      __ mov(scratch2, Operand(FACTORY->null_value()));
-      __ cmp(scratch, scratch2);
-      deferred->Branch(ne);
-
-      if (is_contextual) {
-#ifdef DEBUG
-        InlinedNamedLoadInstructions += 1;
-#endif
-        // Load the (initially invalid) cell and get its value.
-        masm()->mov(receiver, Operand(FACTORY->null_value()));
-        __ ldr(receiver,
-               FieldMemOperand(receiver, JSGlobalPropertyCell::kValueOffset));
-
-        deferred->set_is_dont_delete(is_dont_delete);
-
-        if (!is_dont_delete) {
-#ifdef DEBUG
-          InlinedNamedLoadInstructions += 3;
-#endif
-          __ cmp(receiver, Operand(FACTORY->the_hole_value()));
-          deferred->Branch(eq);
-        } else if (FLAG_debug_code) {
-#ifdef DEBUG
-          InlinedNamedLoadInstructions += 3;
-#endif
-          __ cmp(receiver, Operand(FACTORY->the_hole_value()));
-          __ b(&check_the_hole, eq);
-          __ bind(&cont);
-        }
-      } else {
-        // Initially use an invalid index. The index will be patched by the
-        // inline cache code.
-        __ ldr(receiver, MemOperand(receiver, 0));
-      }
-
-      // Make sure that the expected number of instructions are generated.
-      // If the code before is updated, the offsets in ic-arm.cc
-      // LoadIC::PatchInlinedContextualLoad and PatchInlinedLoad need
-      // to be updated.
-      ASSERT_EQ(InlinedNamedLoadInstructions,
-                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-
-    deferred->BindExit();
-    // At this point the receiver register has the result, either from the
-    // deferred code or from the inlined code.
-    frame_->EmitPush(receiver);
-  }
-}
-
-
-void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-
-    // Get the value and receiver from the stack.
-    frame()->PopToR0();
-    Register value = r0;
-    frame()->PopToR1();
-    Register receiver = r1;
-
-    DeferredReferenceSetNamedValue* deferred =
-        new DeferredReferenceSetNamedValue(
-          value, receiver, name, strict_mode_flag());
-
-    // Check that the receiver is a heap object.
-    __ tst(receiver, Operand(kSmiTagMask));
-    deferred->Branch(eq);
-
-    // The following instructions are the part of the inlined
-    // in-object property store code which can be patched. Therefore
-    // the exact number of instructions generated must be fixed, so
-    // the constant pool is blocked while generating this code.
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      Register scratch0 = VirtualFrame::scratch0();
-      Register scratch1 = VirtualFrame::scratch1();
-
-      // Check the map. Initially use an invalid map to force a
-      // failure. The map check will be patched in the runtime system.
-      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-#ifdef DEBUG
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-      __ mov(scratch0, Operand(FACTORY->null_value()));
-      __ cmp(scratch0, scratch1);
-      deferred->Branch(ne);
-
-      int offset = 0;
-      __ str(value, MemOperand(receiver, offset));
-
-      // Update the write barrier and record its size. We do not use
-      // the RecordWrite macro here because we want the offset
-      // addition instruction first to make it easy to patch.
-      Label record_write_start, record_write_done;
-      __ bind(&record_write_start);
-      // Add offset into the object.
-      __ add(scratch0, receiver, Operand(offset));
-      // Test that the object is not in the new space.  We cannot set
-      // region marks for new space pages.
-      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
-      // Record the actual write.
-      __ RecordWriteHelper(receiver, scratch0, scratch1);
-      __ bind(&record_write_done);
-      // Clobber all input registers when running with the debug-code flag
-      // turned on to provoke errors.
-      if (FLAG_debug_code) {
-        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
-        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
-      }
-      // Check that this is the first inlined write barrier or that
-      // this inlined write barrier has the same size as all the other
-      // inlined write barriers.
-      ASSERT((Isolate::Current()->inlined_write_barrier_size() == -1) ||
-             (Isolate::Current()->inlined_write_barrier_size() ==
-              masm()->InstructionsGeneratedSince(&record_write_start)));
-      Isolate::Current()->set_inlined_write_barrier_size(
-          masm()->InstructionsGeneratedSince(&record_write_start));
-
-      // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
-                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-    deferred->BindExit();
-  }
-  ASSERT_EQ(expected_height, frame()->height());
-}
-
-
-void CodeGenerator::EmitKeyedLoad() {
-  if (loop_nesting() == 0) {
-    Comment cmnt(masm_, "[ Load from keyed property");
-    frame_->CallKeyedLoadIC();
-  } else {
-    // Inline the keyed load.
-    Comment cmnt(masm_, "[ Inlined load from keyed property");
-
-    // Counter will be decremented in the deferred code. Placed here to avoid
-    // having it in the instruction stream below where patching will occur.
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(),
-                        1, frame_->scratch0(), frame_->scratch1());
-
-    // Load the key and receiver from the stack.
-    bool key_is_known_smi = frame_->KnownSmiAt(0);
-    Register key = frame_->PopToRegister();
-    Register receiver = frame_->PopToRegister(key);
-
-    // The deferred code expects key and receiver in registers.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(key, receiver);
-
-    // Check that the receiver is a heap object.
-    __ tst(receiver, Operand(kSmiTagMask));
-    deferred->Branch(eq);
-
-    // The following instructions are the part of the inlined load keyed
-    // property code which can be patched. Therefore the exact number of
-    // instructions generated need to be fixed, so the constant pool is blocked
-    // while generating this code.
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      Register scratch1 = VirtualFrame::scratch0();
-      Register scratch2 = VirtualFrame::scratch1();
-      // Check the map. The null map used below is patched by the inline cache
-      // code.
-      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-      // Check that the key is a smi.
-      if (!key_is_known_smi) {
-        __ tst(key, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-      }
-
-#ifdef DEBUG
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-      __ mov(scratch2, Operand(FACTORY->null_value()));
-      __ cmp(scratch1, scratch2);
-      deferred->Branch(ne);
-
-      // Get the elements array from the receiver.
-      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-      __ AssertFastElements(scratch1);
-
-      // Check that key is within bounds. Use unsigned comparison to handle
-      // negative keys.
-      __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
-      __ cmp(scratch2, key);
-      deferred->Branch(ls);  // Unsigned less equal.
-
-      // Load and check that the result is not the hole (key is a smi).
-      __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
-      __ add(scratch1,
-             scratch1,
-             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ ldr(scratch1,
-             MemOperand(scratch1, key, LSL,
-                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-      __ cmp(scratch1, scratch2);
-      deferred->Branch(eq);
-
-      __ mov(r0, scratch1);
-      // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
-                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-
-    deferred->BindExit();
-  }
-}
-
-
-void CodeGenerator::EmitKeyedStore(StaticType* key_type,
-                                   WriteBarrierCharacter wb_info) {
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    // Inline the keyed store.
-    Comment cmnt(masm_, "[ Inlined store to keyed property");
-
-    Register scratch1 = VirtualFrame::scratch0();
-    Register scratch2 = VirtualFrame::scratch1();
-    Register scratch3 = r3;
-
-    // Counter will be decremented in the deferred code. Placed here to avoid
-    // having it in the instruction stream below where patching will occur.
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(),
-                        1, scratch1, scratch2);
-
-
-    // Load the value, key and receiver from the stack.
-    bool value_is_harmless = frame_->KnownSmiAt(0);
-    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
-    bool key_is_smi = frame_->KnownSmiAt(1);
-    Register value = frame_->PopToRegister();
-    Register key = frame_->PopToRegister(value);
-    VirtualFrame::SpilledScope spilled(frame_);
-    Register receiver = r2;
-    frame_->EmitPop(receiver);
-
-#ifdef DEBUG
-    bool we_remembered_the_write_barrier = value_is_harmless;
-#endif
-
-    // The deferred code expects value, key and receiver in registers.
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(
-          value, key, receiver, strict_mode_flag());
-
-    // Check that the value is a smi. As this inlined code does not set the
-    // write barrier it is only possible to store smi values.
-    if (!value_is_harmless) {
-      // If the value is not likely to be a Smi then let's test the fixed array
-      // for new space instead.  See below.
-      if (wb_info == LIKELY_SMI) {
-        __ tst(value, Operand(kSmiTagMask));
-        deferred->Branch(ne);
-#ifdef DEBUG
-        we_remembered_the_write_barrier = true;
-#endif
-      }
-    }
-
-    if (!key_is_smi) {
-      // Check that the key is a smi.
-      __ tst(key, Operand(kSmiTagMask));
-      deferred->Branch(ne);
-    }
-
-    // Check that the receiver is a heap object.
-    __ tst(receiver, Operand(kSmiTagMask));
-    deferred->Branch(eq);
-
-    // Check that the receiver is a JSArray.
-    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
-    deferred->Branch(ne);
-
-    // Get the elements array from the receiver.
-    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-    if (!value_is_harmless && wb_info != LIKELY_SMI) {
-      Label ok;
-      __ and_(scratch2,
-              scratch1,
-              Operand(ExternalReference::new_space_mask(isolate())));
-      __ cmp(scratch2, Operand(ExternalReference::new_space_start(isolate())));
-      __ tst(value, Operand(kSmiTagMask), ne);
-      deferred->Branch(ne);
-#ifdef DEBUG
-      we_remembered_the_write_barrier = true;
-#endif
-    }
-    // Check that the elements array is not a dictionary.
-    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
-
-    // The following instructions are the part of the inlined store keyed
-    // property code which can be patched. Therefore the exact number of
-    // instructions generated need to be fixed, so the constant pool is blocked
-    // while generating this code.
-    { Assembler::BlockConstPoolScope block_const_pool(masm_);
-#ifdef DEBUG
-      Label check_inlined_codesize;
-      masm_->bind(&check_inlined_codesize);
-#endif
-
-      // Read the fixed array map from the constant pool (not from the root
-      // array) so that the value can be patched.  When debugging, we patch this
-      // comparison to always fail so that we will hit the IC call in the
-      // deferred code which will allow the debugger to break for fast case
-      // stores.
-      __ mov(scratch3, Operand(FACTORY->fixed_array_map()));
-      __ cmp(scratch2, scratch3);
-      deferred->Branch(ne);
-
-      // Check that the key is within bounds.  Both the key and the length of
-      // the JSArray are smis (because the fixed array check above ensures the
-      // elements are in fast case). Use unsigned comparison to handle negative
-      // keys.
-      __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
-      __ cmp(scratch3, key);
-      deferred->Branch(ls);  // Unsigned less equal.
-
-      // Store the value.
-      __ add(scratch1, scratch1,
-             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ str(value,
-             MemOperand(scratch1, key, LSL,
-                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-
-      // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
-                masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-    }
-
-    ASSERT(we_remembered_the_write_barrier);
-
-    deferred->BindExit();
-  } else {
-    frame()->CallKeyedStoreIC(strict_mode_flag());
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
-#endif
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
-  }
-}
-
-
-void Reference::DupIfPersist() {
-  if (persist_after_get_) {
-    switch (type_) {
-      case KEYED:
-        cgen_->frame()->Dup2();
-        break;
-      case NAMED:
-        cgen_->frame()->Dup();
-        // Fall through.
-      case UNLOADED:
-      case ILLEGAL:
-      case SLOT:
-        // Do nothing.
-        ;
-    }
-  } else {
-    set_unloaded();
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
-  MacroAssembler* masm = cgen_->masm();
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      DupIfPersist();
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      Handle<String> name = GetName();
-      DupIfPersist();
-      cgen_->EmitNamedLoad(name, is_global);
-      break;
-    }
-
-    case KEYED: {
-      ASSERT(property != NULL);
-      DupIfPersist();
-      cgen_->EmitKeyedLoad();
-      cgen_->frame()->EmitPush(r0);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
-  ASSERT(!is_illegal());
-  ASSERT(!cgen_->has_cc());
-  MacroAssembler* masm = cgen_->masm();
-  VirtualFrame* frame = cgen_->frame();
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      cgen_->EmitNamedStore(GetName(), false);
-      frame->EmitPush(r0);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression_->AsProperty();
-      ASSERT(property != NULL);
-      cgen_->CodeForSourcePosition(property->position());
-      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
-      frame->EmitPush(r0);
-      set_unloaded();
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int len = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, len),
-               "GenericBinaryOpStub_%s_%s%s_%s",
-               op_name,
-               overwrite_name,
-               specialized_on_rhs_ ? "_ConstantRhs" : "",
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 9b1f103..01aa805 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,162 +37,8 @@
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class JumpTarget;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
-enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state, WriteBarrierCharacter wb);
-
-  // This is in preparation for something that uses the reference on the stack.
-  // If we need this reference afterwards get then dup it now.  Otherwise mark
-  // it as used.
-  inline void DupIfPersist();
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the label pair).  It is threaded through the
-// call stack.  Constructing a state implicitly pushes it on the owning code
-// generator's stack of states, and destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  virtual ~CodeGenState();
-
-  virtual JumpTarget* true_target() const { return NULL; }
-  virtual JumpTarget* false_target() const { return NULL; }
-
- protected:
-  inline CodeGenerator* owner() { return owner_; }
-  inline CodeGenState* previous() const { return previous_; }
-
- private:
-  CodeGenerator* owner_;
-  CodeGenState* previous_;
-};
-
-
-class ConditionCodeGenState : public CodeGenState {
- public:
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own pair of branch labels.
-  ConditionCodeGenState(CodeGenerator* owner,
-                        JumpTarget* true_target,
-                        JumpTarget* false_target);
-
-  virtual JumpTarget* true_target() const { return true_target_; }
-  virtual JumpTarget* false_target() const { return false_target_; }
-
- private:
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-};
-
-
-class TypeInfoCodeGenState : public CodeGenState {
- public:
-  TypeInfoCodeGenState(CodeGenerator* owner,
-                       Slot* slot_number,
-                       TypeInfo info);
-  ~TypeInfoCodeGenState();
-
-  virtual JumpTarget* true_target() const { return previous()->true_target(); }
-  virtual JumpTarget* false_target() const {
-    return previous()->false_target();
-  }
-
- private:
-  Slot* slot_;
-  TypeInfo old_type_info_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
 
 // -------------------------------------------------------------------------
 // CodeGenerator
@@ -225,45 +71,6 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  TypeInfo type_info(Slot* slot) {
-    int index = NumberOfSlot(slot);
-    if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
-    return (*type_info_)[index];
-  }
-
-  TypeInfo set_type_info(Slot* slot, TypeInfo info) {
-    int index = NumberOfSlot(slot);
-    ASSERT(index >= kInvalidSlotNumber);
-    if (index != kInvalidSlotNumber) {
-      TypeInfo previous_value = (*type_info_)[index];
-      (*type_info_)[index] = info;
-      return previous_value;
-    }
-    return TypeInfo::Unknown();
-  }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
   // Constants related to patching of inlined load/store.
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     return FLAG_debug_code ? 32 : 13;
@@ -275,317 +82,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  static const int kInvalidSlotNumber = -1;
-
-  int NumberOfSlot(Slot* slot);
-
-  // State
-  bool has_cc() const { return cc_reg_ != al; }
-  JumpTarget* true_target() const { return state_->true_target(); }
-  JumpTarget* false_target() const { return state_->false_target(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).  The return value should
-  // be in r0.
-  void GenerateReturnSequence();
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  void StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  MemOperand SlotOperand(Slot* slot, Register tmp);
-
-  MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
-                                               Register tmp,
-                                               Register tmp2,
-                                               JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* x,
-                     JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool force_cc);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver(Register scratch);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-
-  // Store the value on top of the stack to a slot.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Load a named property, returning it in r0. The receiver is passed on the
-  // stack, and remains there.
-  void EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // Store to a named property. If the store is contextual, value is passed on
-  // the frame and consumed. Otherwise, receiver and value are passed on the
-  // frame and consumed. The result is returned in r0.
-  void EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a keyed property, leaving it in r0.  The receiver and key are
-  // passed on the stack, and remain there.
-  void EmitKeyedLoad();
-
-  // Store a keyed property. Key and receiver are on the stack and the value is
-  // in r0. Result is returned in r0.
-  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
-
-  void LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                         TypeofState typeof_state,
-                                         JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(Token::Value op,
-                              OverwriteMode overwrite_mode,
-                              GenerateInlineSmi inline_smi,
-                              int known_rhs =
-                                  GenericBinaryOpStub::kUnknownIntValue);
-  void Comparison(Condition cc,
-                  Expression* left,
-                  Expression* right,
-                  bool strict = false);
-
-  void SmiOperation(Token::Value op,
-                    Handle<Object> value,
-                    bool reversed,
-                    OverwriteMode mode);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  // Control flow
-  void Branch(bool if_true, JumpTarget* target);
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  static Handle<Code> ComputeLazyCompile(int argc);
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.
-  bool HasValidEntryRegisters();
-#endif
-
-  List<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  Condition cc_reg_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  Vector<TypeInfo>* type_info_;
-
-  // Jump targets
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class InlineRuntimeFunctionsTable;
-  friend class LCodeGen;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 0ac567c..823c6ff 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,12 +28,9 @@
 #ifndef V8_ARM_CONSTANTS_ARM_H_
 #define V8_ARM_CONSTANTS_ARM_H_
 
-// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
-// are not running on real ARM hardware.  One reason for this is that the
-// old ABI uses fp registers in the calling convention and the simulator does
-// not simulate fp registers or coroutine instructions.
-#if defined(__ARM_EABI__) || !defined(__arm__)
-# define USE_ARM_EABI 1
+// ARM EABI is required.
+#if defined(__arm__) && !defined(__ARM_EABI__)
+#error ARM EABI support is required.
 #endif
 
 // This means that interwork-compatible jump instructions are generated.  We
@@ -346,7 +343,9 @@
   da_x         = (0|0|0) << 21,  // Decrement after.
   ia_x         = (0|4|0) << 21,  // Increment after.
   db_x         = (8|0|0) << 21,  // Decrement before.
-  ib_x         = (8|4|0) << 21   // Increment before.
+  ib_x         = (8|4|0) << 21,  // Increment before.
+
+  kBlockAddrModeMask = (8|4|1) << 21
 };
 
 
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 0f5bf56..51cfeb6 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -42,11 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(VFP3) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(VFP3);
 }
 
 
@@ -74,62 +75,33 @@
   register uint32_t end asm("a2") =
       reinterpret_cast<uint32_t>(start) + size;
   register uint32_t flg asm("a3") = 0;
-  #ifdef __ARM_EABI__
-    #if defined (__arm__) && !defined(__thumb__)
-      // __arm__ may be defined in thumb mode.
-      register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
-      asm volatile(
-          "svc 0x0"
-          : "=r" (beg)
-          : "0" (beg), "r" (end), "r" (flg), "r" (scno));
-    #else
-      // r7 is reserved by the EABI in thumb mode.
-      asm volatile(
-      "@   Enter ARM Mode  \n\t"
-          "adr r3, 1f      \n\t"
-          "bx  r3          \n\t"
-          ".ALIGN 4        \n\t"
-          ".ARM            \n"
-      "1:  push {r7}       \n\t"
-          "mov r7, %4      \n\t"
-          "svc 0x0         \n\t"
-          "pop {r7}        \n\t"
-      "@   Enter THUMB Mode\n\t"
-          "adr r3, 2f+1    \n\t"
-          "bx  r3          \n\t"
-          ".THUMB          \n"
-      "2:                  \n\t"
-          : "=r" (beg)
-          : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
-          : "r3");
-    #endif
+  #if defined (__arm__) && !defined(__thumb__)
+    // __arm__ may be defined in thumb mode.
+    register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+    asm volatile(
+        "svc 0x0"
+        : "=r" (beg)
+        : "0" (beg), "r" (end), "r" (flg), "r" (scno));
   #else
-    #if defined (__arm__) && !defined(__thumb__)
-      // __arm__ may be defined in thumb mode.
-      asm volatile(
-          "svc %1"
-          : "=r" (beg)
-          : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
-    #else
-      // Do not use the value of __ARM_NR_cacheflush in the inline assembly
-      // below, because the thumb mode value would be used, which would be
-      // wrong, since we switch to ARM mode before executing the svc instruction
-      asm volatile(
-      "@   Enter ARM Mode  \n\t"
-          "adr r3, 1f      \n\t"
-          "bx  r3          \n\t"
-          ".ALIGN 4        \n\t"
-          ".ARM            \n"
-      "1:  svc 0x9f0002    \n"
-      "@   Enter THUMB Mode\n\t"
-          "adr r3, 2f+1    \n\t"
-          "bx  r3          \n\t"
-          ".THUMB          \n"
-      "2:                  \n\t"
-          : "=r" (beg)
-          : "0" (beg), "r" (end), "r" (flg)
-          : "r3");
-    #endif
+    // r7 is reserved by the EABI in thumb mode.
+    asm volatile(
+    "@   Enter ARM Mode  \n\t"
+        "adr r3, 1f      \n\t"
+        "bx  r3          \n\t"
+        ".ALIGN 4        \n\t"
+        ".ARM            \n"
+    "1:  push {r7}       \n\t"
+        "mov r7, %4      \n\t"
+        "svc 0x0         \n\t"
+        "pop {r7}        \n\t"
+    "@   Enter THUMB Mode\n\t"
+        "adr r3, 2f+1    \n\t"
+        "bx  r3          \n\t"
+        ".THUMB          \n"
+    "2:                  \n\t"
+        : "=r" (beg)
+        : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
+        : "r3");
   #endif
 #endif
 }
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e6ad98c..07a2272 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_ARM)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 namespace v8 {
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 3a3dcf0..f0a6937 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -586,14 +586,16 @@
 
   // Allocate a new deoptimizer object.
   // Pass four arguments in r0 to r3 and fifth argument on stack.
-  __ PrepareCallCFunction(5, r5);
+  __ PrepareCallCFunction(6, r5);
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ mov(r1, Operand(type()));  // bailout type,
   // r2: bailout id already loaded.
   // r3: code address or 0 already loaded.
   __ str(r4, MemOperand(sp, 0 * kPointerSize));  // Fp-to-sp delta.
+  __ mov(r5, Operand(ExternalReference::isolate_address()));
+  __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
   // Call Deoptimizer::New().
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
 
   // Preserve "deoptimizer" object in register r0 and get the input
   // frame descriptor pointer to r1 (deoptimizer->input_);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 899b88a..a3775b5 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -371,25 +371,34 @@
 int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
   ASSERT((format[0] == 'S') || (format[0] == 'D'));
 
+  VFPRegPrecision precision =
+      format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
+
+  int retval = 2;
+  int reg = -1;
   if (format[1] == 'n') {
-    int reg = instr->VnValue();
-    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
-    if (format[0] == 'D') PrintDRegister(reg);
-    return 2;
+    reg = instr->VFPNRegValue(precision);
   } else if (format[1] == 'm') {
-    int reg = instr->VmValue();
-    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
-    if (format[0] == 'D') PrintDRegister(reg);
-    return 2;
+    reg = instr->VFPMRegValue(precision);
   } else if (format[1] == 'd') {
-    int reg = instr->VdValue();
-    if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
-    if (format[0] == 'D') PrintDRegister(reg);
-    return 2;
+    reg = instr->VFPDRegValue(precision);
+    if (format[2] == '+') {
+      int immed8 = instr->Immed8Value();
+      if (format[0] == 'S') reg += immed8 - 1;
+      if (format[0] == 'D') reg += (immed8 / 2 - 1);
+    }
+    if (format[2] == '+') retval = 3;
+  } else {
+    UNREACHABLE();
   }
 
-  UNREACHABLE();
-  return -1;
+  if (precision == kSinglePrecision) {
+    PrintSRegister(reg);
+  } else {
+    PrintDRegister(reg);
+  }
+
+  return retval;
 }
 
 
@@ -1273,9 +1282,22 @@
           Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
         }
         break;
+      case 0x4:
+      case 0x5:
+      case 0x6:
+      case 0x7:
+      case 0x9:
+      case 0xB: {
+        bool to_vfp_register = (instr->VLValue() == 0x1);
+        if (to_vfp_register) {
+          Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
+        } else {
+          Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
+        }
+        break;
+      }
       default:
         Unknown(instr);  // Not used by V8.
-        break;
     }
   } else if (instr->CoprocessorValue() == 0xB) {
     switch (instr->OpcodeValue()) {
@@ -1303,9 +1325,19 @@
           Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
         }
         break;
+      case 0x4:
+      case 0x5:
+      case 0x9: {
+        bool to_vfp_register = (instr->VLValue() == 0x1);
+        if (to_vfp_register) {
+          Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
+        } else {
+          Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
+        }
+        break;
+      }
       default:
         Unknown(instr);  // Not used by V8.
-        break;
     }
   } else {
     Unknown(instr);  // Not used by V8.
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 4aa8d6a..d6846c8 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -136,7 +136,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 088ba58..85e4262 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -245,7 +245,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       Label ok;
       __ LoadRoot(ip, Heap::kStackLimitRootIndex);
       __ cmp(sp, Operand(ip));
@@ -431,8 +431,7 @@
     if (true_label_ != fall_through_) __ b(true_label_);
   } else if (lit->IsString()) {
     if (String::cast(*lit)->length() == 0) {
-    if (false_label_ != fall_through_) __ b(false_label_);
-      __ b(false_label_);
+      if (false_label_ != fall_through_) __ b(false_label_);
     } else {
       if (true_label_ != fall_through_) __ b(true_label_);
     }
@@ -562,7 +561,7 @@
 void FullCodeGenerator::DoTest(Label* if_true,
                                Label* if_false,
                                Label* fall_through) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Emit the inlined tests assumed by the stub.
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -824,7 +823,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -851,7 +850,7 @@
       __ cmp(r1, r0);
       __ b(ne, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ b(clause->body_target()->entry_label());
+      __ b(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -862,7 +861,7 @@
     __ cmp(r0, Operand(0));
     __ b(ne, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ b(clause->body_target()->entry_label());
+    __ b(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -872,14 +871,14 @@
   if (default_clause == NULL) {
     __ b(nested_statement.break_target());
   } else {
-    __ b(default_clause->body_target()->entry_label());
+    __ b(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -1622,27 +1621,26 @@
       break;
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(r0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2352,16 +2350,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the fast code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
-
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2543,11 +2531,75 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  if (FLAG_debug_code) __ AbortIfSmi(r0);
+
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
+  __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ b(ne, if_true);
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r2, ip);
+  __ b(eq, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ ldr(r4, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+  __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
+  // r4: descriptor array
+  // r3: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ add(r2, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Calculate location of the first key name.
+  __ add(r4,
+         r4,
+         Operand(FixedArray::kHeaderSize - kHeapObjectTag +
+                 DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  // The use of ip to store the valueOf symbol asumes that it is not otherwise
+  // used in the loop below.
+  __ mov(ip, Operand(FACTORY->value_of_symbol()));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ ldr(r3, MemOperand(r4, 0));
+  __ cmp(r3, ip);
+  __ b(eq, if_false);
+  __ add(r4, r4, Operand(kPointerSize));
+  __ bind(&entry);
+  __ cmp(r4, Operand(r2));
+  __ b(ne, &loop);
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
+  __ tst(r2, Operand(kSmiTagMask));
+  __ b(eq, if_false);
+  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
+  __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ cmp(r2, r3);
+  __ b(ne, if_false);
+
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+  __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2802,9 +2854,10 @@
   // Convert 32 random bits in r0 to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  if (isolate()->cpu_features()->IsSupported(VFP3)) {
-    __ PrepareCallCFunction(0, r1);
-    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(1, r0);
+    __ mov(r0, Operand(ExternalReference::isolate_address()));
+    __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
     CpuFeatures::Scope scope(VFP3);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
@@ -2822,10 +2875,11 @@
     __ vstr(d7, r0, HeapNumber::kValueOffset);
     __ mov(r0, r4);
   } else {
+    __ PrepareCallCFunction(2, r0);
     __ mov(r0, Operand(r4));
-    __ PrepareCallCFunction(1, r1);
+    __ mov(r1, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(
-        ExternalReference::fill_heap_number_with_random_function(isolate()), 1);
+        ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
 
   context()->Plug(r0);
@@ -3107,15 +3161,14 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in r1. Move it in there.
-  if (!result_register().is(r1)) __ mov(r1, result_register());
+  // InvokeFunction requires the function in r1. Move it in there.
+  __ mov(r1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(r1, count, CALL_FUNCTION);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3827,7 +3880,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
@@ -4237,7 +4294,6 @@
     default:
       break;
   }
-
   __ Call(ic, mode);
 }
 
@@ -4259,7 +4315,6 @@
     default:
       break;
   }
-
   __ Call(ic, RelocInfo::CODE_TARGET);
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index dc4f761..db04f33 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #include "assembler-arm.h"
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "disasm.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -926,217 +926,6 @@
   __ TailCallExternalReference(ref, 2, 1);
 }
 
-// Returns the code marker, or the 0 if the code is not marked.
-static inline int InlinedICSiteMarker(Address address,
-                                      Address* inline_end_address) {
-  if (V8::UseCrankshaft()) return false;
-
-  // If the instruction after the call site is not the pseudo instruction nop1
-  // then this is not related to an inlined in-object property load. The nop1
-  // instruction is located just after the call to the IC in the deferred code
-  // handling the miss in the inlined code. After the nop1 instruction there is
-  // a branch instruction for jumping back from the deferred code.
-  Address address_after_call = address + Assembler::kCallTargetAddressOffset;
-  Instr instr_after_call = Assembler::instr_at(address_after_call);
-  int code_marker = MacroAssembler::GetCodeMarker(instr_after_call);
-
-  // A negative result means the code is not marked.
-  if (code_marker <= 0) return 0;
-
-  Address address_after_nop = address_after_call + Assembler::kInstrSize;
-  Instr instr_after_nop = Assembler::instr_at(address_after_nop);
-  // There may be some reg-reg move and frame merging code to skip over before
-  // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
-  // code.
-  while (!Assembler::IsBranch(instr_after_nop)) {
-    address_after_nop += Assembler::kInstrSize;
-    instr_after_nop = Assembler::instr_at(address_after_nop);
-  }
-
-  // Find the end of the inlined code for handling the load.
-  int b_offset =
-      Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
-  ASSERT(b_offset < 0);  // Jumping back from deferred code.
-  *inline_end_address = address_after_nop + b_offset;
-
-  return code_marker;
-}
-
-
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for handling the load if this is an
-  // inlined IC call site.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
-  // The immediate must be representable in 12 bits.
-  ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
-  Address ldr_property_instr_address =
-      inline_end_address - Assembler::kInstrSize;
-  ASSERT(Assembler::IsLdrRegisterImmediate(
-      Assembler::instr_at(ldr_property_instr_address)));
-  Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
-  ldr_property_instr = Assembler::SetLdrRegisterImmediateOffset(
-      ldr_property_instr, offset - kHeapObjectTag);
-  Assembler::instr_at_put(ldr_property_instr_address, ldr_property_instr);
-
-  // Indicate that code has changed.
-  CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
-
-  // Patch the map check.
-  // For PROPERTY_ACCESS_INLINED, the load map instruction is generated
-  // 4 instructions before the end of the inlined code.
-  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
-  int ldr_map_offset = -4;
-  Address ldr_map_instr_address =
-      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // Find the end of the inlined code for handling the contextual load if
-  // this is inlined IC call site.
-  Address inline_end_address = 0;
-  int marker = InlinedICSiteMarker(address, &inline_end_address);
-  if (!((marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT) ||
-        (marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE))) {
-    return false;
-  }
-  // On ARM we don't rely on the is_dont_delete argument as the hint is already
-  // embedded in the code marker.
-  bool marker_is_dont_delete =
-      marker == Assembler::PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE;
-
-  // These are the offsets from the end of the inlined code.
-  // See codgen-arm.cc CodeGenerator::EmitNamedLoad.
-  int ldr_map_offset = marker_is_dont_delete ? -5: -8;
-  int ldr_cell_offset = marker_is_dont_delete ? -2: -5;
-  if (FLAG_debug_code && marker_is_dont_delete) {
-    // Three extra instructions were generated to check for the_hole_value.
-    ldr_map_offset -= 3;
-    ldr_cell_offset -= 3;
-  }
-  Address ldr_map_instr_address =
-      inline_end_address + ldr_map_offset * Assembler::kInstrSize;
-  Address ldr_cell_instr_address =
-      inline_end_address + ldr_cell_offset * Assembler::kInstrSize;
-
-  // Patch the map check.
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  // Patch the cell address.
-  Assembler::set_target_address_at(ldr_cell_instr_address,
-                                   reinterpret_cast<Address>(cell));
-
-  return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for the store if there is an
-  // inlined version of the store.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Compute the address of the map load instruction.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
-       Assembler::kInstrSize);
-
-  // Update the offsets if initializing the inlined store. No reason
-  // to update the offsets when clearing the inlined version because
-  // it will bail out in the map check.
-  if (map != HEAP->null_value()) {
-    // Patch the offset in the actual store instruction.
-    Address str_property_instr_address =
-        ldr_map_instr_address + 3 * Assembler::kInstrSize;
-    Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
-    ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
-    str_property_instr = Assembler::SetStrRegisterImmediateOffset(
-        str_property_instr, offset - kHeapObjectTag);
-    Assembler::instr_at_put(str_property_instr_address, str_property_instr);
-
-    // Patch the offset in the add instruction that is part of the
-    // write barrier.
-    Address add_offset_instr_address =
-        str_property_instr_address + Assembler::kInstrSize;
-    Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
-    ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
-    add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
-        add_offset_instr, offset - kHeapObjectTag);
-    Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
-
-    // Indicate that code has changed.
-    CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
-  }
-
-  // Patch the map check.
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the map check.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
-      Assembler::kInstrSize);
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Find the end of the inlined code for handling the store if this is an
-  // inlined IC call site.
-  Address inline_end_address = 0;
-  if (InlinedICSiteMarker(address, &inline_end_address)
-      != Assembler::PROPERTY_ACCESS_INLINED) {
-    return false;
-  }
-
-  // Patch the map check.
-  Address ldr_map_instr_address =
-      inline_end_address -
-      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
-      Assembler::kInstrSize);
-  Assembler::set_target_address_at(ldr_map_instr_address,
-                                   reinterpret_cast<Address>(map));
-  return true;
-}
-
 
 Object* KeyedLoadIC_Miss(Arguments args);
 
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
deleted file mode 100644
index df370c4..0000000
--- a/src/arm/jump-target-arm.cc
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (entry_frame_set_) {
-    if (entry_label_.is_bound()) {
-      // If we already bound and generated code at the destination then it
-      // is too late to ask for less optimistic type assumptions.
-      ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
-    }
-    // There already a frame expectation at the target.
-    cgen()->frame()->MergeTo(&entry_frame_);
-    cgen()->DeleteFrame();
-  } else {
-    // Clone the current frame to use as the expected one at the target.
-    set_entry_frame(cgen()->frame());
-    // Zap the fall-through frame since the jump was unconditional.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-  if (entry_label_.is_bound()) {
-    // You can't jump backwards to an already bound label unless you admitted
-    // up front that this was a bidirectional jump target.  Bidirectional jump
-    // targets will zap their type info when bound in case some later virtual
-    // frame with less precise type info branches to them.
-    ASSERT(direction_ != FORWARD_ONLY);
-  }
-  __ jmp(&entry_label_);
-}
-
-
-void JumpTarget::DoBranch(Condition cond, Hint ignored) {
-  ASSERT(cgen()->has_valid_frame());
-
-  if (entry_frame_set_) {
-    if (entry_label_.is_bound()) {
-      // If we already bound and generated code at the destination then it
-      // is too late to ask for less optimistic type assumptions.
-      ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
-    }
-    // We have an expected frame to merge to on the backward edge.
-    cgen()->frame()->MergeTo(&entry_frame_, cond);
-  } else {
-    // Clone the current frame to use as the expected one at the target.
-    set_entry_frame(cgen()->frame());
-  }
-  if (entry_label_.is_bound()) {
-    // You can't branch backwards to an already bound label unless you admitted
-    // up front that this was a bidirectional jump target.  Bidirectional jump
-    // targets will zap their type info when bound in case some later virtual
-    // frame with less precise type info branches to them.
-    ASSERT(direction_ != FORWARD_ONLY);
-  }
-  __ b(cond, &entry_label_);
-  if (cond == al) {
-    cgen()->DeleteFrame();
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  // Calls are always 'forward' so we use a copy of the current frame (plus
-  // one for a return address) as the expected frame.
-  ASSERT(!entry_frame_set_);
-  VirtualFrame target_frame = *cgen()->frame();
-  target_frame.Adjust(1);
-  set_entry_frame(&target_frame);
-
-  __ bl(&entry_label_);
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  if (cgen()->has_valid_frame()) {
-    if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
-    // If there is a current frame we can use it on the fall through.
-    if (!entry_frame_set_) {
-      entry_frame_ = *cgen()->frame();
-      entry_frame_set_ = true;
-    } else {
-      cgen()->frame()->MergeTo(&entry_frame_);
-      // On fall through we may have to merge both ways.
-      if (direction_ != FORWARD_ONLY) {
-        // This will not need to adjust the virtual frame entries that are
-        // register allocated since that was done above and they now match.
-        // But it does need to adjust the entry_frame_ of this jump target
-        // to make it potentially less optimistic.  Later code can branch back
-        // to this jump target and we need to assert that that code does not
-        // have weaker assumptions about types.
-        entry_frame_.MergeTo(cgen()->frame());
-      }
-    }
-  } else {
-    // If there is no current frame we must have an entry frame which we can
-    // copy.
-    ASSERT(entry_frame_set_);
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 5d31473..faf6404 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -61,22 +61,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -301,6 +300,13 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[r2] #%d / ", arity());
 }
@@ -1114,9 +1120,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
@@ -1212,6 +1218,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1329,7 +1343,7 @@
     return DoArithmeticD(Token::DIV, instr);
   } else if (instr->representation().IsInteger32()) {
     // TODO(1042) The fixed register allocation
-    // is needed because we call GenericBinaryOpStub from
+    // is needed because we call TypeRecordingBinaryOpStub from
     // the generated code, which requires registers r0
     // and r1 to be used. We should remove that
     // when we provide a native implementation.
@@ -1723,26 +1737,42 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal();
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), r0);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   if (instr->check_hole_value()) {
     LOperand* temp = TempRegister();
     LOperand* value = UseRegister(instr->value());
-    return AssignEnvironment(new LStoreGlobal(value, temp));
+    return AssignEnvironment(new LStoreGlobalCell(value, temp));
   } else {
     LOperand* value = UseRegisterAtStart(instr->value());
-    return new LStoreGlobal(value, NULL);
+    return new LStoreGlobalCell(value, NULL);
   }
 }
 
 
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), r1);
+  LOperand* value = UseFixed(instr->value(), r0);
+  LStoreGlobalGeneric* result =
+      new LStoreGlobalGeneric(global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new LLoadContextSlot(context));
@@ -1824,21 +1854,20 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
-  // TODO(danno): Add support for other external array types.
-  if (instr->array_type() != kExternalPixelArray) {
-    Abort("unsupported load for external array type.");
-    return NULL;
-  }
-
-  ASSERT(instr->representation().IsInteger32());
+  ExternalArrayType array_type = instr->array_type();
+  Representation representation(instr->representation());
+  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
+         (representation.IsDouble() && array_type == kExternalFloatArray));
   ASSERT(instr->key()->representation().IsInteger32());
-  LOperand* external_pointer =
-      UseRegisterAtStart(instr->external_pointer());
-  LOperand* key = UseRegisterAtStart(instr->key());
+  LOperand* external_pointer = UseRegister(instr->external_pointer());
+  LOperand* key = UseRegister(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new LLoadKeyedSpecializedArrayElement(external_pointer,
-                                            key);
-  return DefineAsRegister(result);
+      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
+  LInstruction* load_instr = DefineAsRegister(result);
+  // An unsigned int array load might overflow and cause a deopt, make sure it
+  // has an environment.
+  return (array_type == kExternalUnsignedIntArray) ?
+      AssignEnvironment(load_instr) : load_instr;
 }
 
 
@@ -1873,23 +1902,24 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
-  // TODO(danno): Add support for other external array types.
-  if (instr->array_type() != kExternalPixelArray) {
-    Abort("unsupported store for external array type.");
-    return NULL;
-  }
-
-  ASSERT(instr->value()->representation().IsInteger32());
+  Representation representation(instr->value()->representation());
+  ExternalArrayType array_type = instr->array_type();
+  ASSERT((representation.IsInteger32() && array_type != kExternalFloatArray) ||
+         (representation.IsDouble() && array_type == kExternalFloatArray));
   ASSERT(instr->external_pointer()->representation().IsExternal());
   ASSERT(instr->key()->representation().IsInteger32());
 
   LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* value = UseTempRegister(instr->value());  // changed by clamp.
+  bool val_is_temp_register = array_type == kExternalPixelArray ||
+      array_type == kExternalFloatArray;
+  LOperand* val = val_is_temp_register
+      ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
   LOperand* key = UseRegister(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
                                                 key,
-                                                value);
+                                                val);
 }
 
 
@@ -1930,6 +1960,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -2061,8 +2098,6 @@
     }
   }
 
-  ASSERT(env->length() == instr->environment_length());
-
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 77aabaf..4add6bf 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -106,6 +106,7 @@
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
@@ -119,7 +120,8 @@
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
   V(LoadFunctionPrototype)                      \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -144,12 +146,14 @@
   V(SmiUntag)                                   \
   V(StackCheck)                                 \
   V(StoreContextSlot)                           \
-  V(StoreGlobal)                                \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
@@ -1259,22 +1263,55 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
  public:
-  LStoreGlobal(LOperand* value, LOperand* temp) {
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+  LStoreGlobalCell(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = global_object;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* global_object() { return InputAt(0); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(1); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1377,6 +1414,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1605,6 +1659,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1644,6 +1699,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1669,6 +1725,22 @@
 };
 
 
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+};
+
+
+
 class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharCodeAt(LOperand* string, LOperand* index) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 75406cf..2d415cb 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -91,7 +91,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -149,7 +149,7 @@
   __ add(fp, sp, Operand(2 * kPointerSize));  // Adjust FP to point to saved FP.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ mov(r0, Operand(slots));
@@ -263,7 +263,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -459,7 +459,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -484,11 +484,19 @@
 void LCodeGen::CallCode(Handle<Code> code,
                         RelocInfo::Mode mode,
                         LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ Call(code, mode);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode);
 }
 
 
@@ -501,11 +509,21 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -517,8 +535,16 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        0,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -650,6 +676,8 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(expected_safepoint_kind_ == kind);
+
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
@@ -1015,7 +1043,7 @@
   Register left = ToRegister(instr->InputAt(0));
   Register right = ToRegister(instr->InputAt(1));
 
-  __ PushSafepointRegistersAndDoubles();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
   // Move left to r1 and right to r0 for the stub call.
   if (left.is(r1)) {
     __ Move(r0, right);
@@ -1037,7 +1065,6 @@
                                          Safepoint::kNoDeoptimizationIndex);
   // Overwrite the stored value of r0 with the result of the stub.
   __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
-  __ PopSafepointRegistersAndDoubles();
 }
 
 
@@ -1460,11 +1487,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ PopSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 
@@ -2065,7 +2089,7 @@
       flags | InstanceofStub::kReturnTrueFalseObject);
   InstanceofStub stub(flags);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
 
   // Get the temp register reserved by the instruction. This needs to be r4 as
   // its slot of the pushing of safepoint registers is used to communicate the
@@ -2080,12 +2104,13 @@
   __ BlockConstPoolFor(kAdditionalDelta);
   __ mov(temp, Operand(delta * kPointerSize));
   __ StoreToSafepointRegisterSlot(temp, temp);
-  CallCode(stub.GetCode(),  RelocInfo::CODE_TARGET, instr);
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   // Put the result value into the result register slot and
   // restore all registers.
   __ StoreToSafepointRegisterSlot(result, result);
-
-  __ PopSafepointRegisters();
 }
 
 
@@ -2155,7 +2180,7 @@
     __ push(r0);
     __ CallRuntime(Runtime::kTraceExit, 1);
   }
-  int32_t sp_delta = (ParameterCount() + 1) * kPointerSize;
+  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
   __ mov(sp, fp);
   __ ldm(ia_w, sp, fp.bit() | lr.bit());
   __ add(sp, sp, Operand(sp_delta));
@@ -2163,7 +2188,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
@@ -2175,7 +2200,19 @@
 }
 
 
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  __ mov(r2, Operand(instr->name()));
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
+                                             : RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
 
@@ -2200,6 +2237,18 @@
 }
 
 
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(r1));
+  ASSERT(ToRegister(instr->value()).is(r0));
+
+  __ mov(r2, Operand(instr->name()));
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2361,12 +2410,14 @@
     __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
     __ cmp(scratch, ip);
     __ b(eq, &done);
-    __ LoadRoot(ip, Heap::kExternalPixelArrayMapRootIndex);
-    __ cmp(scratch, ip);
-    __ b(eq, &done);
     __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
     __ cmp(scratch, ip);
-    __ Check(eq, "Check for fast elements failed.");
+    __ b(eq, &done);
+    __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+    __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
+    __ cmp(scratch, Operand(kExternalArrayTypeCount));
+    __ Check(cc, "Check for fast elements failed.");
     __ bind(&done);
   }
 }
@@ -2419,14 +2470,47 @@
 
 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
     LLoadKeyedSpecializedArrayElement* instr) {
-  ASSERT(instr->array_type() == kExternalPixelArray);
-
   Register external_pointer = ToRegister(instr->external_pointer());
   Register key = ToRegister(instr->key());
-  Register result = ToRegister(instr->result());
-
-  // Load the result.
-  __ ldrb(result, MemOperand(external_pointer, key));
+  ExternalArrayType array_type = instr->array_type();
+  if (array_type == kExternalFloatArray) {
+    CpuFeatures::Scope scope(VFP3);
+    DwVfpRegister result(ToDoubleRegister(instr->result()));
+    __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
+    __ vldr(result.low(), scratch0(), 0);
+    __ vcvt_f64_f32(result, result.low());
+  } else {
+    Register result(ToRegister(instr->result()));
+    switch (array_type) {
+      case kExternalByteArray:
+        __ ldrsb(result, MemOperand(external_pointer, key));
+        break;
+      case kExternalUnsignedByteArray:
+      case kExternalPixelArray:
+        __ ldrb(result, MemOperand(external_pointer, key));
+        break;
+      case kExternalShortArray:
+        __ ldrsh(result, MemOperand(external_pointer, key, LSL, 1));
+        break;
+      case kExternalUnsignedShortArray:
+        __ ldrh(result, MemOperand(external_pointer, key, LSL, 1));
+        break;
+      case kExternalIntArray:
+        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+        break;
+      case kExternalUnsignedIntArray:
+        __ ldr(result, MemOperand(external_pointer, key, LSL, 2));
+        __ cmp(result, Operand(0x80000000));
+        // TODO(danno): we could be more clever here, perhaps having a special
+        // version of the stub that detects if the overflow case actually
+        // happens, and generate code that returns a double rather than int.
+        DeoptimizeIf(cs, instr->environment());
+        break;
+      case kExternalFloatArray:
+        UNREACHABLE();
+        break;
+    }
+  }
 }
 
 
@@ -2617,7 +2701,7 @@
   __ Call(ip);
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 
   // Restore context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2655,44 +2739,43 @@
 
   // Input is negative. Reverse its sign.
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  {
+    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
 
-  // Registers were saved at the safepoint, so we can use
-  // many scratch registers.
-  Register tmp1 = input.is(r1) ? r0 : r1;
-  Register tmp2 = input.is(r2) ? r0 : r2;
-  Register tmp3 = input.is(r3) ? r0 : r3;
-  Register tmp4 = input.is(r4) ? r0 : r4;
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(r1) ? r0 : r1;
+    Register tmp2 = input.is(r2) ? r0 : r2;
+    Register tmp3 = input.is(r3) ? r0 : r3;
+    Register tmp4 = input.is(r4) ? r0 : r4;
 
-  // exponent: floating point exponent value.
+    // exponent: floating point exponent value.
 
-  Label allocated, slow;
-  __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
-  __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
-  __ b(&allocated);
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ b(&allocated);
 
-  // Slow case: Call the runtime system to do the number allocation.
-  __ bind(&slow);
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
 
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  // Set the pointer to the new heap number in tmp.
-  if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
-  // Restore input_reg after call to runtime.
-  __ LoadFromSafepointRegisterSlot(input, input);
-  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
 
-  __ bind(&allocated);
-  // exponent: floating point exponent value.
-  // tmp1: allocated heap number.
-  __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
-  __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
-  __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
-  __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
+    __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
 
-  __ StoreToSafepointRegisterSlot(tmp1, input);
-  __ PopSafepointRegisters();
+    __ StoreToSafepointRegisterSlot(tmp1, input);
+  }
 
   __ bind(&done);
 }
@@ -2778,9 +2861,49 @@
 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
-  Register scratch1 = scratch0();
-  Register scratch2 = result;
-  __ EmitVFPTruncate(kRoundToNearest,
+  Register scratch1 = result;
+  Register scratch2 = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ vmov(scratch1, input.high());
+  __ ubfx(scratch2,
+          scratch1,
+          HeapNumber::kExponentShift,
+          HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  __ cmp(scratch2, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, Operand(0), LeaveCC, le);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ b(le, &check_sign_on_zero);
+  } else {
+    __ b(le, &done);
+  }
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  __ cmp(scratch2, Operand(HeapNumber::kExponentBias + 32));
+  DeoptimizeIf(ge, instr->environment());
+
+  // Save the original sign for later comparison.
+  __ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
+
+  __ vmov(double_scratch0(), 0.5);
+  __ vadd(input, input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ vmov(scratch1, input.high());
+  __ eor(scratch1, scratch1, Operand(scratch2), SetCC);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(mi, instr->environment());
+  } else {
+    __ mov(result, Operand(0), LeaveCC, mi);
+    __ b(mi, &done);
+  }
+
+  __ EmitVFPTruncate(kRoundToMinusInf,
                      double_scratch0().low(),
                      input,
                      scratch1,
@@ -2790,14 +2913,14 @@
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Test for -0.
-    Label done;
     __ cmp(result, Operand(0));
     __ b(ne, &done);
+    __ bind(&check_sign_on_zero);
     __ vmov(scratch1, input.high());
     __ tst(scratch1, Operand(HeapNumber::kSignMask));
     DeoptimizeIf(ne, instr->environment());
-    __ bind(&done);
   }
+  __ bind(&done);
 }
 
 
@@ -2942,6 +3065,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(r1));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(r1, count, CALL_FUNCTION, &generator);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
 
@@ -3049,7 +3187,7 @@
 
   // Name is always in r2.
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3090,15 +3228,41 @@
 
 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
     LStoreKeyedSpecializedArrayElement* instr) {
-  ASSERT(instr->array_type() == kExternalPixelArray);
 
   Register external_pointer = ToRegister(instr->external_pointer());
   Register key = ToRegister(instr->key());
-  Register value = ToRegister(instr->value());
-
-  // Clamp the value to [0..255].
-  __ Usat(value, 8, Operand(value));
-  __ strb(value, MemOperand(external_pointer, key, LSL, 0));
+  ExternalArrayType array_type = instr->array_type();
+  if (array_type == kExternalFloatArray) {
+    CpuFeatures::Scope scope(VFP3);
+    DwVfpRegister value(ToDoubleRegister(instr->value()));
+    __ add(scratch0(), external_pointer, Operand(key, LSL, 2));
+    __ vcvt_f32_f64(double_scratch0().low(), value);
+    __ vstr(double_scratch0().low(), scratch0(), 0);
+  } else {
+    Register value(ToRegister(instr->value()));
+    switch (array_type) {
+      case kExternalPixelArray:
+        // Clamp the value to [0..255].
+        __ Usat(value, 8, Operand(value));
+        __ strb(value, MemOperand(external_pointer, key));
+        break;
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ strb(value, MemOperand(external_pointer, key));
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ strh(value, MemOperand(external_pointer, key, LSL, 1));
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray:
+        __ str(value, MemOperand(external_pointer, key, LSL, 2));
+        break;
+      case kExternalFloatArray:
+        UNREACHABLE();
+        break;
+    }
+  }
 }
 
 
@@ -3107,13 +3271,21 @@
   ASSERT(ToRegister(instr->key()).is(r1));
   ASSERT(ToRegister(instr->value()).is(r0));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  __ push(ToRegister(instr->left()));
+  __ push(ToRegister(instr->right()));
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3230,7 +3402,7 @@
   // contained in the register pointer map.
   __ mov(result, Operand(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3243,15 +3415,12 @@
     __ SmiTag(index);
     __ push(index);
   }
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(r0);
   }
   __ SmiUntag(r0);
   __ StoreToSafepointRegisterSlot(r0, result);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3294,14 +3463,11 @@
   // contained in the register pointer map.
   __ mov(result, Operand(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   __ SmiTag(char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(r0, result);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3357,7 +3523,7 @@
   SwVfpRegister flt_scratch = s0;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
 
   // There was overflow, so bits 30 and 31 of the original integer
   // disagree. Try to allocate a heap number in new space and store
@@ -3382,9 +3548,7 @@
   // integer value.
   __ mov(ip, Operand(0));
   __ StoreToSafepointRegisterSlot(ip, reg);
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   if (!reg.is(r0)) __ mov(reg, r0);
 
   // Done. Put the value in dbl_scratch into the value of the allocated heap
@@ -3393,7 +3557,6 @@
   __ sub(ip, reg, Operand(kHeapObjectTag));
   __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
   __ StoreToSafepointRegisterSlot(reg, reg);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3433,12 +3596,9 @@
   Register reg = ToRegister(instr->result());
   __ mov(reg, Operand(0));
 
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   __ StoreToSafepointRegisterSlot(r0, reg);
-  __ PopSafepointRegisters();
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index caa85d2..1110ea6 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -57,7 +57,8 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -137,7 +138,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -157,8 +158,8 @@
                        Register temporary,
                        Register temporary2);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -172,12 +173,24 @@
   bool GenerateDeferredCode();
   bool GenerateSafepointTable();
 
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
   void CallCode(Handle<Code> code,
                 RelocInfo::Mode mode,
                 LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
   void CallRuntime(const Runtime::Function* function,
                    int num_arguments,
                    LInstruction* instr);
+
   void CallRuntime(Runtime::FunctionId id,
                    int num_arguments,
                    LInstruction* instr) {
@@ -185,6 +198,10 @@
     CallRuntime(function, num_arguments, instr);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -193,7 +210,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode);
+
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -292,6 +311,48 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    PushSafepointRegistersScope(LCodeGen* codegen,
+                                Safepoint::Kind kind)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = kind;
+
+      switch (codegen_->expected_safepoint_kind_) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PushSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PushSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    ~PushSafepointRegistersScope() {
+      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
+      ASSERT((kind & Safepoint::kWithRegisters) != 0);
+      switch (kind) {
+        case Safepoint::kWithRegisters:
+          codegen_->masm_->PopSafepointRegisters();
+          break;
+        case Safepoint::kWithRegistersAndDoubles:
+          codegen_->masm_->PopSafepointRegistersAndDoubles();
+          break;
+        default:
+          UNREACHABLE();
+      }
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 3a1a8b6..6a095d3 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -32,18 +32,21 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(HEAP->undefined_value()) {
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -292,7 +295,7 @@
 
   } else if (!src2.is_single_instruction() &&
              !src2.must_use_constant_pool() &&
-             Isolate::Current()->cpu_features()->IsSupported(ARMv7) &&
+             CpuFeatures::IsSupported(ARMv7) &&
              IsPowerOf2(src2.immediate() + 1)) {
     ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
 
@@ -305,7 +308,7 @@
 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
                           Condition cond) {
   ASSERT(lsb < 32);
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     and_(dst, src1, Operand(mask), LeaveCC, cond);
     if (lsb != 0) {
@@ -320,7 +323,7 @@
 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
                           Condition cond) {
   ASSERT(lsb < 32);
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     and_(dst, src1, Operand(mask), LeaveCC, cond);
     int shift_up = 32 - lsb - width;
@@ -348,7 +351,7 @@
   ASSERT(lsb + width < 32);
   ASSERT(!scratch.is(dst));
   if (width == 0) return;
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     bic(dst, dst, Operand(mask));
     and_(scratch, src, Operand((1 << width) - 1));
@@ -362,7 +365,7 @@
 
 void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
   ASSERT(lsb < 32);
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     bic(dst, dst, Operand(mask));
   } else {
@@ -373,7 +376,7 @@
 
 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
                           Condition cond) {
-  if (!Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (!CpuFeatures::IsSupported(ARMv7)) {
     ASSERT(!dst.is(pc) && !src.rm().is(pc));
     ASSERT((satpos >= 0) && (satpos <= 31));
 
@@ -619,7 +622,7 @@
   ASSERT_EQ(dst1.code() + 1, dst2.code());
 
   // Generate two ldr instructions if ldrd is not available.
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     ldrd(dst1, dst2, src, cond);
   } else {
@@ -644,7 +647,7 @@
   ASSERT_EQ(src1.code() + 1, src2.code());
 
   // Generate two str instructions if strd is not available.
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     CpuFeatures::Scope scope(ARMv7);
     strd(src1, src2, dst, cond);
   } else {
@@ -746,12 +749,10 @@
 
   // Optionally save all double registers.
   if (save_doubles) {
-    sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
-    const int offset = -2 * kPointerSize;
-    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
-      DwVfpRegister reg = DwVfpRegister::from_code(i);
-      vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
-    }
+    DwVfpRegister first = d0;
+    DwVfpRegister last =
+        DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
+    vstm(db_w, sp, first, last);
     // Note that d0 will be accessible at
     //   fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
     // since the sp slot and code slot were pushed after the fp.
@@ -808,11 +809,13 @@
                                     Register argument_count) {
   // Optionally restore all double registers.
   if (save_doubles) {
-    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
-      DwVfpRegister reg = DwVfpRegister::from_code(i);
-      const int offset = -2 * kPointerSize;
-      vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
-    }
+    // Calculate the stack location of the saved doubles and restore them.
+    const int offset = 2 * kPointerSize;
+    sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
+    DwVfpRegister first = d0;
+    DwVfpRegister last =
+        DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
+    vldm(ia, r3, first, last);
   }
 
   // Clear top frame.
@@ -836,11 +839,7 @@
 }
 
 void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-#if !defined(USE_ARM_EABI)
-  UNREACHABLE();
-#else
   vmov(dst, r0, r1);
-#endif
 }
 
 
@@ -1799,9 +1798,10 @@
   bind(&delete_allocated_handles);
   str(r5, MemOperand(r7, kLimitOffset));
   mov(r4, r0);
-  PrepareCallCFunction(0, r5);
+  PrepareCallCFunction(1, r5);
+  mov(r0, Operand(ExternalReference::isolate_address()));
   CallCFunction(
-      ExternalReference::delete_handle_scope_extensions(isolate()), 0);
+      ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   mov(r0, r4);
   jmp(&leave_exit_frame);
 
@@ -1902,7 +1902,7 @@
                                     Register scratch2,
                                     DwVfpRegister double_scratch,
                                     Label *not_int32) {
-  if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     sub(scratch, source, Operand(kHeapObjectTag));
     vldr(double_scratch, scratch, HeapNumber::kValueOffset);
@@ -1998,7 +1998,7 @@
                                      Register scratch1,
                                      Register scratch2,
                                      CheckForInexactConversion check_inexact) {
-  ASSERT(Isolate::Current()->cpu_features()->IsSupported(VFP3));
+  ASSERT(CpuFeatures::IsSupported(VFP3));
   CpuFeatures::Scope scope(VFP3);
   Register prev_fpscr = scratch1;
   Register scratch = scratch2;
@@ -2156,7 +2156,7 @@
 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                          Register src,
                                          int num_least_bits) {
-  if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
     ubfx(dst, src, kSmiTagSize, num_least_bits);
   } else {
     mov(dst, Operand(src, ASR, kSmiTagSize));
@@ -2797,9 +2797,6 @@
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Up to four simple arguments are passed in registers r0..r3.
   int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
                                0 : num_arguments - kRegisterPassedArguments;
@@ -2836,19 +2833,6 @@
                                          ExternalReference function_reference,
                                          Register scratch,
                                          int num_arguments) {
-  // Push Isolate address as the last argument.
-  if (num_arguments < kRegisterPassedArguments) {
-    Register arg_to_reg[] = {r0, r1, r2, r3};
-    Register r = arg_to_reg[num_arguments];
-    mov(r, Operand(ExternalReference::isolate_address()));
-  } else {
-    int stack_passed_arguments = num_arguments - kRegisterPassedArguments;
-    // Push Isolate address on the stack after the arguments.
-    mov(scratch, Operand(ExternalReference::isolate_address()));
-    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
-  }
-  num_arguments += 1;
-
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -2911,7 +2895,7 @@
     : address_(address),
       instructions_(instructions),
       size_(instructions * Assembler::kInstrSize),
-      masm_(address, size_ + Assembler::kGap) {
+      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 2b81c08..ab5efb0 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -90,7 +90,11 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
@@ -781,7 +785,10 @@
   // Store the function for the given builtin in the target register.
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
 
   // ---------------------------------------------------------------------------
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 8d540d4..4bd8c80 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -116,7 +116,7 @@
 RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -347,7 +347,7 @@
     __ sub(current_input_offset(), r2, end_of_input_address());
   } else {
     ASSERT(mode_ == UC16);
-    int argument_count = 3;
+    int argument_count = 4;
     __ PrepareCallCFunction(argument_count, r2);
 
     // r0 - offset of start of capture
@@ -358,6 +358,7 @@
     //   r0: Address byte_offset1 - Address captured substring's start.
     //   r1: Address byte_offset2 - Address of current character position.
     //   r2: size_t byte_length - length of capture in bytes(!)
+    //   r3: Isolate* isolate
 
     // Address of start of capture.
     __ add(r0, r0, Operand(end_of_input_address()));
@@ -367,6 +368,8 @@
     __ mov(r4, Operand(r1));
     // Address of current input position.
     __ add(r1, current_input_offset(), Operand(end_of_input_address()));
+    // Isolate.
+    __ mov(r3, Operand(ExternalReference::isolate_address()));
 
     ExternalReference function =
         ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
@@ -778,10 +781,11 @@
     Label grow_failed;
 
     // Call GrowStack(backtrack_stackpointer(), &stack_base)
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments, r0);
     __ mov(r0, backtrack_stackpointer());
     __ add(r1, frame_pointer(), Operand(kStackHighEnd));
+    __ mov(r2, Operand(ExternalReference::isolate_address()));
     ExternalReference grow_stack =
         ExternalReference::re_grow_stack(masm_->isolate());
     __ CallCFunction(grow_stack, num_arguments);
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
deleted file mode 100644
index 945cdeb..0000000
--- a/src/arm/register-allocator-arm-inl.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
-}
-
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.  The mapping is:
-//
-// r0 <-> 0
-// r1 <-> 1
-// r2 <-> 2
-// r3 <-> 3
-// r4 <-> 4
-// r5 <-> 5
-// r6 <-> 6
-// r7 <-> 7
-// r9 <-> 8
-// r10 <-> 9
-// ip <-> 10
-// lr <-> 11
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // r0
-    1,   // r1
-    2,   // r2
-    3,   // r3
-    4,   // r4
-    5,   // r5
-    6,   // r6
-    7,   // r7
-    -1,  // cp
-    8,   // r9
-    9,   // r10
-    -1,  // fp
-    10,  // ip
-    -1,  // sp
-    11,  // lr
-    -1   // pc
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] =
-      { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
deleted file mode 100644
index 3b35574..0000000
--- a/src/arm/register-allocator-arm.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  UNIMPLEMENTED();
-}
-
-
-void Result::ToRegister(Register target) {
-  UNIMPLEMENTED();
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // No byte registers on ARM.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 46797d9..da554c2 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,6 +67,7 @@
   Simulator* sim_;
 
   int32_t GetRegisterValue(int regnum);
+  double GetRegisterPairDoubleValue(int regnum);
   double GetVFPDoubleRegisterValue(int regnum);
   bool GetValue(const char* desc, int32_t* value);
   bool GetVFPSingleValue(const char* desc, float* value);
@@ -168,6 +169,11 @@
 }
 
 
+double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
+  return sim_->get_double_from_register_pair(regnum);
+}
+
+
 double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
   return sim_->get_double_from_d_register(regnum);
 }
@@ -305,14 +311,22 @@
         // Leave the debugger shell.
         done = true;
       } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
-        if (argc == 2) {
+        if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
           int32_t value;
           float svalue;
           double dvalue;
           if (strcmp(arg1, "all") == 0) {
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
-              PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
+              PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
+              if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
+                  i < 8 &&
+                  (i % 2) == 0) {
+                dvalue = GetRegisterPairDoubleValue(i);
+                PrintF(" (%f)\n", dvalue);
+              } else {
+                PrintF("\n");
+              }
             }
             for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
               dvalue = GetVFPDoubleRegisterValue(i);
@@ -550,6 +564,7 @@
         PrintF("print <register>\n");
         PrintF("  print register content (alias 'p')\n");
         PrintF("  use register name 'all' to print all registers\n");
+        PrintF("  add argument 'fp' to print register pair double values\n");
         PrintF("printobject <register>\n");
         PrintF("  print an object from a register (alias 'po')\n");
         PrintF("flags\n");
@@ -873,6 +888,19 @@
 }
 
 
+double Simulator::get_double_from_register_pair(int reg) {
+  ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+
+  double dm_val = 0.0;
+  // Read the bits from the unsigned integer register_[] array
+  // into the double precision floating point value and return it.
+  char buffer[2 * sizeof(vfp_register[0])];
+  memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+  memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+  return(dm_val);
+}
+
+
 void Simulator::set_dw_register(int dreg, const int* dbl) {
   ASSERT((dreg >= 0) && (dreg < num_d_registers));
   registers_[dreg] = dbl[0];
@@ -938,12 +966,7 @@
   // 2*sreg and 2*sreg+1.
   char buffer[2 * sizeof(vfp_register[0])];
   memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
-#ifndef BIG_ENDIAN_FLOATING_POINT
   memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-#else
-  memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
-  memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
-#endif
 }
 
 
@@ -980,12 +1003,7 @@
   // Read the bits from the unsigned integer vfp_register[] array
   // into the double precision floating point value and return it.
   char buffer[2 * sizeof(vfp_register[0])];
-#ifdef BIG_ENDIAN_FLOATING_POINT
-  memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
-  memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
-#else
   memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#endif
   memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
   return(dm_val);
 }
@@ -1504,36 +1522,34 @@
 }
 
 
-// Addressing Mode 4 - Load and Store Multiple
-void Simulator::HandleRList(Instruction* instr, bool load) {
+void Simulator::ProcessPUW(Instruction* instr,
+                           int num_regs,
+                           int reg_size,
+                           intptr_t* start_address,
+                           intptr_t* end_address) {
   int rn = instr->RnValue();
   int32_t rn_val = get_register(rn);
-  int rlist = instr->RlistValue();
-  int num_regs = count_bits(rlist);
-
-  intptr_t start_address = 0;
-  intptr_t end_address = 0;
   switch (instr->PUField()) {
     case da_x: {
       UNIMPLEMENTED();
       break;
     }
     case ia_x: {
-      start_address = rn_val;
-      end_address = rn_val + (num_regs * 4) - 4;
-      rn_val = rn_val + (num_regs * 4);
+      *start_address = rn_val;
+      *end_address = rn_val + (num_regs * reg_size) - reg_size;
+      rn_val = rn_val + (num_regs * reg_size);
       break;
     }
     case db_x: {
-      start_address = rn_val - (num_regs * 4);
-      end_address = rn_val - 4;
-      rn_val = start_address;
+      *start_address = rn_val - (num_regs * reg_size);
+      *end_address = rn_val - reg_size;
+      rn_val = *start_address;
       break;
     }
     case ib_x: {
-      start_address = rn_val + 4;
-      end_address = rn_val + (num_regs * 4);
-      rn_val = end_address;
+      *start_address = rn_val + reg_size;
+      *end_address = rn_val + (num_regs * reg_size);
+      rn_val = *end_address;
       break;
     }
     default: {
@@ -1544,6 +1560,17 @@
   if (instr->HasW()) {
     set_register(rn, rn_val);
   }
+}
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::HandleRList(Instruction* instr, bool load) {
+  int rlist = instr->RlistValue();
+  int num_regs = count_bits(rlist);
+
+  intptr_t start_address = 0;
+  intptr_t end_address = 0;
+  ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
+
   intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
   int reg = 0;
   while (rlist != 0) {
@@ -1562,6 +1589,57 @@
 }
 
 
+// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
+void Simulator::HandleVList(Instruction* instr) {
+  VFPRegPrecision precision =
+      (instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
+  int operand_size = (precision == kSinglePrecision) ? 4 : 8;
+
+  bool load = (instr->VLValue() == 0x1);
+
+  int vd;
+  int num_regs;
+  vd = instr->VFPDRegValue(precision);
+  if (precision == kSinglePrecision) {
+    num_regs = instr->Immed8Value();
+  } else {
+    num_regs = instr->Immed8Value() / 2;
+  }
+
+  intptr_t start_address = 0;
+  intptr_t end_address = 0;
+  ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
+
+  intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+  for (int reg = vd; reg < vd + num_regs; reg++) {
+    if (precision == kSinglePrecision) {
+      if (load) {
+        set_s_register_from_sinteger(
+            reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+      } else {
+        WriteW(reinterpret_cast<int32_t>(address),
+               get_sinteger_from_s_register(reg), instr);
+      }
+      address += 1;
+    } else {
+      if (load) {
+        set_s_register_from_sinteger(
+            2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
+        set_s_register_from_sinteger(
+            2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+      } else {
+        WriteW(reinterpret_cast<int32_t>(address),
+               get_sinteger_from_s_register(2 * reg), instr);
+        WriteW(reinterpret_cast<int32_t>(address + 1),
+               get_sinteger_from_s_register(2 * reg + 1), instr);
+      }
+      address += 2;
+    }
+  }
+  ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+}
+
+
 // Calls into the V8 runtime are based on this very simple interface.
 // Note: To be able to return two values from some calls the code in runtime.cc
 // uses the ObjectPair which is essentially two 32-bit values stuffed into a
@@ -2945,9 +3023,17 @@
         }
         break;
       }
+      case 0x4:
+      case 0x5:
+      case 0x6:
+      case 0x7:
+      case 0x9:
+      case 0xB:
+        // Load/store multiple single from memory: vldm/vstm.
+        HandleVList(instr);
+        break;
       default:
         UNIMPLEMENTED();  // Not used by V8.
-        break;
     }
   } else if (instr->CoprocessorValue() == 0xB) {
     switch (instr->OpcodeValue()) {
@@ -2994,9 +3080,14 @@
         }
         break;
       }
+      case 0x4:
+      case 0x5:
+      case 0x9:
+        // Load/store multiple double from memory: vldm/vstm.
+        HandleVList(instr);
+        break;
       default:
         UNIMPLEMENTED();  // Not used by V8.
-        break;
     }
   } else {
     UNIMPLEMENTED();  // Not used by V8.
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index b7b1b68..a16cae5 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -155,6 +155,7 @@
   // instruction.
   void set_register(int reg, int32_t value);
   int32_t get_register(int reg) const;
+  double get_double_from_register_pair(int reg);
   void set_dw_register(int dreg, const int* dbl);
 
   // Support for VFP.
@@ -236,7 +237,13 @@
   // Helper functions to decode common "addressing" modes
   int32_t GetShiftRm(Instruction* instr, bool* carry_out);
   int32_t GetImm(Instruction* instr, bool* carry_out);
+  void ProcessPUW(Instruction* instr,
+                  int num_regs,
+                  int operand_size,
+                  intptr_t* start_address,
+                  intptr_t* end_address);
   void HandleRList(Instruction* instr, bool load);
+  void HandleVList(Instruction* inst);
   void SoftwareInterrupt(Instruction* instr);
 
   // Stop helper functions.
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 9936ac0..47d675b 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -953,7 +953,7 @@
                             Register fval,
                             Register scratch1,
                             Register scratch2) {
-  if (masm->isolate()->cpu_features()->IsSupported(VFP3)) {
+  if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     __ vmov(s0, ival);
     __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
@@ -2048,7 +2048,7 @@
   //  -- sp[argc * 4]           : receiver
   // -----------------------------------
 
-  if (!masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+  if (!CpuFeatures::IsSupported(VFP3)) {
       return heap()->undefined_value();
   }
 
@@ -3509,7 +3509,7 @@
       __ ldr(value, MemOperand(r3, key, LSL, 1));
       break;
     case kExternalFloatArray:
-      if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+      if (CpuFeatures::IsSupported(VFP3)) {
         CpuFeatures::Scope scope(VFP3);
         __ add(r2, r3, Operand(key, LSL, 1));
         __ vldr(s0, r2, 0);
@@ -3548,7 +3548,7 @@
     // Now we can use r0 for the result as key is not needed any more.
     __ mov(r0, r5);
 
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       __ vmov(s0, value);
       __ vcvt_f64_s32(d0, s0);
@@ -3563,7 +3563,7 @@
     // The test is different for unsigned int values. Since we need
     // the value to be in the range of a positive smi, we can't
     // handle either of the top two bits being set in the value.
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       Label box_int, done;
       __ tst(value, Operand(0xC0000000));
@@ -3627,7 +3627,7 @@
   } else if (array_type == kExternalFloatArray) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
       // Allocate a HeapNumber for the result. Don't use r0 and r1 as
       // AllocateHeapNumber clobbers all registers - also when jumping due to
@@ -3820,7 +3820,7 @@
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
     // reproducible behavior, convert these to zero.
-    if (masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+    if (CpuFeatures::IsSupported(VFP3)) {
       CpuFeatures::Scope scope(VFP3);
 
       if (array_type == kExternalFloatArray) {
diff --git a/src/arm/virtual-frame-arm-inl.h b/src/arm/virtual-frame-arm-inl.h
deleted file mode 100644
index 6a7902a..0000000
--- a/src/arm/virtual-frame-arm-inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
-#define V8_VIRTUAL_FRAME_ARM_INL_H_
-
-#include "assembler-arm.h"
-#include "virtual-frame-arm.h"
-
-namespace v8 {
-namespace internal {
-
-// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
-// file if such a thing existed.
-MemOperand VirtualFrame::ParameterAt(int index) {
-  // Index -1 corresponds to the receiver.
-  ASSERT(-1 <= index);  // -1 is the receiver.
-  ASSERT(index <= parameter_count());
-  return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
-}
-
-  // The receiver frame slot.
-MemOperand VirtualFrame::Receiver() {
-  return ParameterAt(-1);
-}
-
-
-void VirtualFrame::Forget(int count) {
-  SpillAll();
-  LowerHeight(count);
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
deleted file mode 100644
index a852d6e..0000000
--- a/src/arm/virtual-frame-arm.cc
+++ /dev/null
@@ -1,843 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_ARM)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::PopToR1R0() {
-  // Shuffle things around so the top of stack is in r0 and r1.
-  MergeTOSTo(R0_R1_TOS);
-  // Pop the two registers off the stack so they are detached from the frame.
-  LowerHeight(2);
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR1() {
-  // Shuffle things around so the top of stack is only in r1.
-  MergeTOSTo(R1_TOS);
-  // Pop the register off the stack so it is detached from the frame.
-  LowerHeight(1);
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::PopToR0() {
-  // Shuffle things around so the top of stack only in r0.
-  MergeTOSTo(R0_TOS);
-  // Pop the register off the stack so it is detached from the frame.
-  LowerHeight(1);
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
-  if (Equals(expected)) return;
-  ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
-         expected->tos_known_smi_map_);
-  ASSERT(expected->IsCompatibleWith(this));
-  MergeTOSTo(expected->top_of_stack_state_, cond);
-  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
-  if (Equals(expected)) return;
-  tos_known_smi_map_ &= expected->tos_known_smi_map_;
-  MergeTOSTo(expected->top_of_stack_state_, cond);
-  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
-}
-
-
-void VirtualFrame::MergeTOSTo(
-    VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
-#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
-  switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
-    case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
-      __ pop(r0, cond);
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
-      __ pop(r1, cond);
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
-      __ push(r0, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, R0_TOS):
-      break;
-    case CASE_NUMBER(R0_TOS, R1_TOS):
-      __ mov(r1, r0, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, R0_R1_TOS):
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(R0_TOS, R1_R0_TOS):
-      __ mov(r1, r0, LeaveCC, cond);
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
-      __ push(r1, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, R0_TOS):
-      __ mov(r0, r1, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, R1_TOS):
-      break;
-    case CASE_NUMBER(R1_TOS, R0_R1_TOS):
-      __ mov(r0, r1, LeaveCC, cond);
-      __ pop(r1, cond);
-      break;
-    case CASE_NUMBER(R1_TOS, R1_R0_TOS):
-      __ pop(r0, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
-      __ Push(r1, r0, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R0_TOS):
-      __ push(r1, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R1_TOS):
-      __ push(r1, cond);
-      __ mov(r1, r0, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
-      break;
-    case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
-      __ Swap(r0, r1, ip, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
-      __ Push(r0, r1, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R0_TOS):
-      __ push(r0, cond);
-      __ mov(r0, r1, LeaveCC, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R1_TOS):
-      __ push(r0, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
-      __ Swap(r0, r1, ip, cond);
-      break;
-    case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
-      break;
-    default:
-      UNREACHABLE();
-#undef CASE_NUMBER
-  }
-  // A conditional merge will be followed by a conditional branch and the
-  // fall-through code will have an unchanged virtual frame state.  If the
-  // merge is unconditional ('al'ways) then it might be followed by a fall
-  // through.  We need to update the virtual frame state to match the code we
-  // are falling into.  The final case is an unconditional merge followed by an
-  // unconditional branch, in which case it doesn't matter what we do to the
-  // virtual frame state, because the virtual frame will be invalidated.
-  if (cond == al) {
-    top_of_stack_state_ = expected_top_of_stack_state;
-  }
-}
-
-
-void VirtualFrame::Enter() {
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  // Verify that r1 contains a JS function.  The following code relies
-  // on r2 being available for use.
-  if (FLAG_debug_code) {
-    Label map_check, done;
-    __ tst(r1, Operand(kSmiTagMask));
-    __ b(ne, &map_check);
-    __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
-    __ bind(&map_check);
-    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-    __ b(eq, &done);
-    __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
-    __ bind(&done);
-  }
-#endif  // DEBUG
-
-  // We are about to push four values to the frame.
-  Adjust(4);
-  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
-  // Adjust FP to point to saved FP.
-  __ add(fp, sp, Operand(2 * kPointerSize));
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Drop the execution stack down to the frame pointer and restore the caller
-  // frame pointer and return address.
-  __ mov(sp, fp);
-  __ ldm(ia_w, sp, fp.bit() | lr.bit());
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    Adjust(count);
-    // Initialize stack slots with 'undefined' value.
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-    if (count < kLocalVarBound) {
-      // For less locals the unrolled loop is more compact.
-      for (int i = 0; i < count; i++) {
-        __ push(ip);
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      __ mov(r1, Operand(count));
-      __ bind(&alloc_locals_loop);
-      __ push(ip);
-      __ sub(r1, r1, Operand(1), SetCC);
-      __ b(ne, &alloc_locals_loop);
-    }
-  } else {
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-  }
-  // Check the stack for overflow or a break request.
-  masm()->cmp(sp, Operand(r2));
-  StackCheckStub stub;
-  // Call the stub if lower.
-  masm()->mov(ip,
-              Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                      RelocInfo::CODE_TARGET),
-              LeaveCC,
-              lo);
-  masm()->Call(ip, lo);
-}
-
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  UNIMPLEMENTED();
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  // Grow the expression stack by handler size less one (the return
-  // address in lr is already counted by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-void VirtualFrame::CallJSFunction(int arg_count) {
-  // InvokeFunction requires function in r1.
-  PopToR1();
-  SpillAll();
-
-  // +1 for receiver.
-  Forget(arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(r1, count, CALL_FUNCTION);
-  // Restore the context.
-  __ ldr(cp, Context());
-}
-
-
-void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  SpillAll();
-  Forget(arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  SpillAll();
-  Forget(arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-}
-#endif
-
-
-void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                 InvokeJSFlags flags,
-                                 int arg_count) {
-  Forget(arg_count);
-  __ InvokeBuiltin(id, flags);
-}
-
-
-void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  PopToR0();
-  SpillAll();
-  __ mov(r2, Operand(name));
-  CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallStoreIC(Handle<String> name,
-                               bool is_contextual,
-                               StrictModeFlag strict_mode) {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-  PopToR0();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    SpillAll();
-    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    EmitPop(r1);
-    SpillAll();
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ mov(r2, Operand(name));
-  CallCodeObject(ic, mode, 0);
-}
-
-
-void VirtualFrame::CallKeyedLoadIC() {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  PopToR1R0();
-  SpillAll();
-  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  PopToR1R0();
-  SpillAll();
-  EmitPop(r2);
-  CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
-}
-
-
-void VirtualFrame::CallCodeObject(Handle<Code> code,
-                                  RelocInfo::Mode rmode,
-                                  int dropped_args) {
-  switch (code->kind()) {
-    case Code::CALL_IC:
-    case Code::KEYED_CALL_IC:
-    case Code::FUNCTION:
-      break;
-    case Code::KEYED_LOAD_IC:
-    case Code::LOAD_IC:
-    case Code::KEYED_STORE_IC:
-    case Code::STORE_IC:
-      ASSERT(dropped_args == 0);
-      break;
-    case Code::BUILTIN:
-      ASSERT(*code == Isolate::Current()->builtins()->builtin(
-          Builtins::kJSConstructCall));
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  Forget(dropped_args);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
-}
-
-
-//    NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
-const bool VirtualFrame::kR0InUse[TOS_STATES] =
-    { false,            true,   false,  true,      true };
-const bool VirtualFrame::kR1InUse[TOS_STATES] =
-    { false,            false,  true,   true,      true };
-const int VirtualFrame::kVirtualElements[TOS_STATES] =
-    { 0,                1,      1,      2,         2 };
-const Register VirtualFrame::kTopRegister[TOS_STATES] =
-    { r0,               r0,     r1,     r1,        r0 };
-const Register VirtualFrame::kBottomRegister[TOS_STATES] =
-    { r0,               r0,     r1,     r0,        r1 };
-const Register VirtualFrame::kAllocatedRegisters[
-    VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
-// Popping is done by the transition implied by kStateAfterPop.  Of course if
-// there were no stack slots allocated to registers then the physical SP must
-// be adjusted.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
-    { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
-// Pushing is done by the transition implied by kStateAfterPush.  Of course if
-// the maximum number of registers was already allocated to the top of stack
-// slots then one register must be physically pushed onto the stack.
-const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
-    { R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  // Discard elements from the virtual frame and free any registers.
-  int num_virtual_elements = kVirtualElements[top_of_stack_state_];
-  while (num_virtual_elements > 0) {
-    Pop();
-    num_virtual_elements--;
-    count--;
-    if (count == 0) return;
-  }
-  if (count == 0) return;
-  __ add(sp, sp, Operand(count * kPointerSize));
-  LowerHeight(count);
-}
-
-
-void VirtualFrame::Pop() {
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    __ add(sp, sp, Operand(kPointerSize));
-  } else {
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-  }
-  LowerHeight(1);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    __ pop(reg);
-  } else {
-    __ mov(reg, kTopRegister[top_of_stack_state_]);
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-  }
-  LowerHeight(1);
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR0() {
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r0, MemOperand(sp, 0));
-      break;
-    case R0_TOS:
-      __ push(r0);
-      break;
-    case R1_TOS:
-      __ push(r1);
-      __ mov(r0, r1);
-      break;
-    case R0_R1_TOS:
-      __ Push(r1, r0);
-      break;
-    case R1_R0_TOS:
-      __ Push(r0, r1);
-      __ mov(r0, r1);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1() {
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r1, MemOperand(sp, 0));
-      break;
-    case R0_TOS:
-      __ push(r0);
-      __ mov(r1, r0);
-      break;
-    case R1_TOS:
-      __ push(r1);
-      break;
-    case R0_R1_TOS:
-      __ Push(r1, r0);
-      __ mov(r1, r0);
-      break;
-    case R1_R0_TOS:
-      __ Push(r0, r1);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-void VirtualFrame::SpillAllButCopyTOSToR1R0() {
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r1, MemOperand(sp, 0));
-      __ ldr(r0, MemOperand(sp, kPointerSize));
-      break;
-    case R0_TOS:
-      __ push(r0);
-      __ mov(r1, r0);
-      __ ldr(r0, MemOperand(sp, kPointerSize));
-      break;
-    case R1_TOS:
-      __ push(r1);
-      __ ldr(r0, MemOperand(sp, kPointerSize));
-      break;
-    case R0_R1_TOS:
-      __ Push(r1, r0);
-      __ Swap(r0, r1, ip);
-      break;
-    case R1_R0_TOS:
-      __ Push(r0, r1);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-Register VirtualFrame::Peek() {
-  AssertIsNotSpilled();
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-    Register answer = kTopRegister[top_of_stack_state_];
-    __ pop(answer);
-    return answer;
-  } else {
-    return kTopRegister[top_of_stack_state_];
-  }
-}
-
-
-Register VirtualFrame::Peek2() {
-  AssertIsNotSpilled();
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-    case R0_TOS:
-    case R0_R1_TOS:
-      MergeTOSTo(R0_R1_TOS);
-      return r1;
-    case R1_TOS:
-    case R1_R0_TOS:
-      MergeTOSTo(R1_R0_TOS);
-      return r0;
-    default:
-      UNREACHABLE();
-      return no_reg;
-  }
-}
-
-
-void VirtualFrame::Dup() {
-  if (SpilledScope::is_spilled()) {
-    __ ldr(ip, MemOperand(sp, 0));
-    __ push(ip);
-  } else {
-    switch (top_of_stack_state_) {
-      case NO_TOS_REGISTERS:
-        __ ldr(r0, MemOperand(sp, 0));
-        top_of_stack_state_ = R0_TOS;
-        break;
-      case R0_TOS:
-        __ mov(r1, r0);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_TOS:
-        __ mov(r0, r1);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R0_R1_TOS:
-        __ push(r1);
-        __ mov(r1, r0);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_R0_TOS:
-        __ push(r0);
-        __ mov(r0, r1);
-        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-  RaiseHeight(1, tos_known_smi_map_ & 1);
-}
-
-
-void VirtualFrame::Dup2() {
-  if (SpilledScope::is_spilled()) {
-    __ ldr(ip, MemOperand(sp, kPointerSize));
-    __ push(ip);
-    __ ldr(ip, MemOperand(sp, kPointerSize));
-    __ push(ip);
-  } else {
-    switch (top_of_stack_state_) {
-      case NO_TOS_REGISTERS:
-        __ ldr(r0, MemOperand(sp, 0));
-        __ ldr(r1, MemOperand(sp, kPointerSize));
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R0_TOS:
-        __ push(r0);
-        __ ldr(r1, MemOperand(sp, kPointerSize));
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_TOS:
-        __ push(r1);
-        __ ldr(r0, MemOperand(sp, kPointerSize));
-        top_of_stack_state_ = R1_R0_TOS;
-        break;
-      case R0_R1_TOS:
-        __ Push(r1, r0);
-        top_of_stack_state_ = R0_R1_TOS;
-        break;
-      case R1_R0_TOS:
-        __ Push(r0, r1);
-        top_of_stack_state_ = R1_R0_TOS;
-        break;
-      default:
-        UNREACHABLE();
-    }
-  }
-  RaiseHeight(2, tos_known_smi_map_ & 3);
-}
-
-
-Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
-  ASSERT(but_not_to_this_one.is(r0) ||
-         but_not_to_this_one.is(r1) ||
-         but_not_to_this_one.is(no_reg));
-  LowerHeight(1);
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    if (but_not_to_this_one.is(r0)) {
-      __ pop(r1);
-      return r1;
-    } else {
-      __ pop(r0);
-      return r0;
-    }
-  } else {
-    Register answer = kTopRegister[top_of_stack_state_];
-    ASSERT(!answer.is(but_not_to_this_one));
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-    return answer;
-  }
-}
-
-
-void VirtualFrame::EnsureOneFreeTOSRegister() {
-  if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
-    __ push(kBottomRegister[top_of_stack_state_]);
-    top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-    top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
-  }
-  ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  RaiseHeight(1, info.IsSmi() ? 1 : 0);
-  if (reg.is(cp)) {
-    // If we are pushing cp then we are about to make a call and things have to
-    // be pushed to the physical stack.  There's nothing to be gained my moving
-    // to a TOS register and then pushing that, we might as well push to the
-    // physical stack immediately.
-    MergeTOSTo(NO_TOS_REGISTERS);
-    __ push(reg);
-    return;
-  }
-  if (SpilledScope::is_spilled()) {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    __ push(reg);
-    return;
-  }
-  if (top_of_stack_state_ == NO_TOS_REGISTERS) {
-    if (reg.is(r0)) {
-      top_of_stack_state_ = R0_TOS;
-      return;
-    }
-    if (reg.is(r1)) {
-      top_of_stack_state_ = R1_TOS;
-      return;
-    }
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  Register dest = kTopRegister[top_of_stack_state_];
-  __ Move(dest, reg);
-}
-
-
-void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
-  if (this_far_down < kTOSKnownSmiMapSize) {
-    tos_known_smi_map_ &= ~(1 << this_far_down);
-  }
-  if (this_far_down == 0) {
-    Pop();
-    Register dest = GetTOSRegister();
-    if (dest.is(reg)) {
-      // We already popped one item off the top of the stack.  If the only
-      // free register is the one we were asked to push then we have been
-      // asked to push a register that was already in use, which cannot
-      // happen.  It therefore folows that there are two free TOS registers:
-      ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-      dest = dest.is(r0) ? r1 : r0;
-    }
-    __ mov(dest, reg);
-    EmitPush(dest);
-  } else if (this_far_down == 1) {
-    int virtual_elements = kVirtualElements[top_of_stack_state_];
-    if (virtual_elements < 2) {
-      __ str(reg, ElementAt(this_far_down));
-    } else {
-      ASSERT(virtual_elements == 2);
-      ASSERT(!reg.is(r0));
-      ASSERT(!reg.is(r1));
-      Register dest = kBottomRegister[top_of_stack_state_];
-      __ mov(dest, reg);
-    }
-  } else {
-    ASSERT(this_far_down >= 2);
-    ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
-    __ str(reg, ElementAt(this_far_down));
-  }
-}
-
-
-Register VirtualFrame::GetTOSRegister() {
-  if (SpilledScope::is_spilled()) return r0;
-
-  EnsureOneFreeTOSRegister();
-  return kTopRegister[kStateAfterPush[top_of_stack_state_]];
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  RaiseHeight(1, info.IsSmi() ? 1 : 0);
-  if (SpilledScope::is_spilled()) {
-    __ mov(r0, operand);
-    __ push(r0);
-    return;
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  __ mov(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
-  RaiseHeight(1, info.IsSmi() ? 1 : 0);
-  if (SpilledScope::is_spilled()) {
-    __ ldr(r0, operand);
-    __ push(r0);
-    return;
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  __ ldr(kTopRegister[top_of_stack_state_], operand);
-}
-
-
-void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
-  RaiseHeight(1, 0);
-  if (SpilledScope::is_spilled()) {
-    __ LoadRoot(r0, index);
-    __ push(r0);
-    return;
-  }
-  EnsureOneFreeTOSRegister();
-  top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
-  __ LoadRoot(kTopRegister[top_of_stack_state_], index);
-}
-
-
-void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
-  ASSERT(SpilledScope::is_spilled());
-  Adjust(count);
-  __ stm(db_w, sp, src_regs);
-}
-
-
-void VirtualFrame::SpillAll() {
-  switch (top_of_stack_state_) {
-    case R1_R0_TOS:
-      masm()->push(r0);
-      // Fall through.
-    case R1_TOS:
-      masm()->push(r1);
-      top_of_stack_state_ = NO_TOS_REGISTERS;
-      break;
-    case R0_R1_TOS:
-      masm()->push(r1);
-      // Fall through.
-    case R0_TOS:
-      masm()->push(r0);
-      top_of_stack_state_ = NO_TOS_REGISTERS;
-      // Fall through.
-    case NO_TOS_REGISTERS:
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  ASSERT(register_allocation_map_ == 0);  // Not yet implemented.
-}
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
deleted file mode 100644
index 6d67e70..0000000
--- a/src/arm/virtual-frame-arm.h
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
-#define V8_ARM_VIRTUAL_FRAME_ARM_H_
-
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// This dummy class is only used to create invalid virtual frames.
-extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
-
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  class RegisterAllocationScope;
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, and keeps it spilled.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    explicit SpilledScope(VirtualFrame* frame)
-      : old_is_spilled_(
-          Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-      if (frame != NULL) {
-        if (!old_is_spilled_) {
-          frame->SpillAll();
-        } else {
-          frame->AssertIsSpilled();
-        }
-      }
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
-    }
-    ~SpilledScope() {
-      Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
-          old_is_spilled_);
-    }
-    static bool is_spilled() {
-      return Isolate::Current()->is_virtual_frame_in_spilled_scope();
-    }
-
-   private:
-    int old_is_spilled_;
-
-    SpilledScope() { }
-
-    friend class RegisterAllocationScope;
-  };
-
-  class RegisterAllocationScope BASE_EMBEDDED {
-   public:
-    // A utility class to introduce a scope where the virtual frame
-    // is not spilled, ie. where register allocation occurs.  Eventually
-    // when RegisterAllocationScope is ubiquitous it can be removed
-    // along with the (by then unused) SpilledScope class.
-    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
-    inline ~RegisterAllocationScope();
-
-   private:
-    CodeGenerator* cgen_;
-    bool old_is_spilled_;
-
-    RegisterAllocationScope() { }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct an invalid virtual frame, used by JumpTargets.
-  inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  inline CodeGenerator* cgen() const;
-  inline MacroAssembler* masm();
-
-  // The number of elements on the virtual frame.
-  int element_count() const { return element_count_; }
-
-  // The height of the virtual expression stack.
-  inline int height() const;
-
-  bool is_used(int num) {
-    switch (num) {
-      case 0: {  // r0.
-        return kR0InUse[top_of_stack_state_];
-      }
-      case 1: {  // r1.
-        return kR1InUse[top_of_stack_state_];
-      }
-      case 2:
-      case 3:
-      case 4:
-      case 5:
-      case 6: {  // r2 to r6.
-        ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
-        ASSERT(num >= kFirstAllocatedRegister);
-        if ((register_allocation_map_ &
-             (1 << (num - kFirstAllocatedRegister))) == 0) {
-          return false;
-        } else {
-          return true;
-        }
-      }
-      default: {
-        ASSERT(num < kFirstAllocatedRegister ||
-               num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
-        return false;
-      }
-    }
-  }
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget elements from the top of the frame to match an actual frame (eg,
-  // the frame after a runtime call).  No code is emitted except to bring the
-  // frame to a spilled state.
-  void Forget(int count);
-
-  // Spill all values from the frame to memory.
-  void SpillAll();
-
-  void AssertIsSpilled() const {
-    ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
-    ASSERT(register_allocation_map_ == 0);
-  }
-
-  void AssertIsNotSpilled() {
-    ASSERT(!SpilledScope::is_spilled());
-  }
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    UNIMPLEMENTED();
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).  Unimplemented.
-  Register SpillAnyRegister();
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected, Condition cond = al);
-  void MergeTo(const VirtualFrame* expected, Condition cond = al);
-
-  // Checks whether this frame can be branched to by the other frame.
-  bool IsCompatibleWith(const VirtualFrame* other) const {
-    return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
-  }
-
-  inline void ForgetTypeInfo() {
-    tos_known_smi_map_ = 0;
-  }
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by elements in the virtual frame. This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site. No spill code emitted. Value to return should be in r0.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 5;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // The current top of the expression stack as an assembly operand.
-  MemOperand Top() {
-    AssertIsSpilled();
-    return MemOperand(sp, 0);
-  }
-
-  // An element of the expression stack as an assembly operand.
-  MemOperand ElementAt(int index) {
-    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
-    ASSERT(adjusted_index >= 0);
-    return MemOperand(sp, adjusted_index * kPointerSize);
-  }
-
-  bool KnownSmiAt(int index) {
-    if (index >= kTOSKnownSmiMapSize) return false;
-    return (tos_known_smi_map_ & (1 << index)) != 0;
-  }
-
-  // A frame-allocated local as an assembly operand.
-  inline MemOperand LocalAt(int index);
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // The function frame slot.
-  MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
-
-  // The context frame slot.
-  MemOperand Context() { return MemOperand(fp, kContextOffset); }
-
-  // A parameter as an assembly operand.
-  inline MemOperand ParameterAt(int index);
-
-  // The receiver frame slot.
-  inline MemOperand Receiver();
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline void CallStub(CodeStub* stub, int arg_count);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  void CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  void CallRuntime(const Runtime::Function* f, int arg_count);
-  void CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  void InvokeBuiltin(Builtins::JavaScript id,
-                     InvokeJSFlags flag,
-                     int arg_count);
-
-  // Call load IC. Receiver is on the stack and is consumed. Result is returned
-  // in r0.
-  void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
-
-  // Call store IC. If the load is contextual, value is found on top of the
-  // frame. If not, value and receiver are on the frame. Both are consumed.
-  // Result is returned in r0.
-  void CallStoreIC(Handle<String> name, bool is_contextual,
-                   StrictModeFlag strict_mode);
-
-  // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
-  // Result is returned in r0.
-  void CallKeyedLoadIC();
-
-  // Call keyed store IC. Value, key and receiver are on the stack. All three
-  // are consumed. Result is returned in r0.
-  void CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call into an IC stub given the number of arguments it removes
-  // from the stack.  Register arguments to the IC stub are implicit,
-  // and depend on the type of IC stub.
-  void CallCodeObject(Handle<Code> ic,
-                      RelocInfo::Mode rmode,
-                      int dropped_args);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Pop an element from the top of the expression stack.  Discards
-  // the result.
-  void Pop();
-
-  // Pop an element from the top of the expression stack.  The register
-  // will be one normally used for the top of stack register allocation
-  // so you can't hold on to it if you push on the stack.
-  Register PopToRegister(Register but_not_to_this_one = no_reg);
-
-  // Look at the top of the stack.  The register returned is aliased and
-  // must be copied to a scratch register before modification.
-  Register Peek();
-
-  // Look at the value beneath the top of the stack.  The register returned is
-  // aliased and must be copied to a scratch register before modification.
-  Register Peek2();
-
-  // Duplicate the top of stack.
-  void Dup();
-
-  // Duplicate the two elements on top of stack.
-  void Dup2();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in r0.
-  void SpillAllButCopyTOSToR0();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in r1.
-  void SpillAllButCopyTOSToR1();
-
-  // Flushes all registers, but it puts a copy of the top-of-stack in r1
-  // and the next value on the stack in r0.
-  void SpillAllButCopyTOSToR1R0();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-
-  // Takes the top two elements and puts them in r0 (top element) and r1
-  // (second element).
-  void PopToR1R0();
-
-  // Takes the top element and puts it in r1.
-  void PopToR1();
-
-  // Takes the top element and puts it in r0.
-  void PopToR0();
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
-  void EmitPushRoot(Heap::RootListIndex index);
-
-  // Overwrite the nth thing on the stack.  If the nth position is in a
-  // register then this turns into a mov, otherwise an str.  Afterwards
-  // you can still use the register even if it is a register that can be
-  // used for TOS (r0 or r1).
-  void SetElementAt(Register reg, int this_far_down);
-
-  // Get a register which is free and which must be immediately used to
-  // push on the top of the stack.
-  Register GetTOSRegister();
-
-  // Push multiple registers on the stack and the virtual frame
-  // Register are selected by setting bit in src_regs and
-  // are pushed in decreasing order: r15 .. r0.
-  void EmitPushMultiple(int count, int src_regs);
-
-  static Register scratch0() { return r7; }
-  static Register scratch1() { return r9; }
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  // 5 states for the top of stack, which can be in memory or in r0 and r1.
-  enum TopOfStack {
-    NO_TOS_REGISTERS,
-    R0_TOS,
-    R1_TOS,
-    R1_R0_TOS,
-    R0_R1_TOS,
-    TOS_STATES
-  };
-
-  static const int kMaxTOSRegisters = 2;
-
-  static const bool kR0InUse[TOS_STATES];
-  static const bool kR1InUse[TOS_STATES];
-  static const int kVirtualElements[TOS_STATES];
-  static const TopOfStack kStateAfterPop[TOS_STATES];
-  static const TopOfStack kStateAfterPush[TOS_STATES];
-  static const Register kTopRegister[TOS_STATES];
-  static const Register kBottomRegister[TOS_STATES];
-
-  // We allocate up to 5 locals in registers.
-  static const int kNumberOfAllocatedRegisters = 5;
-  // r2 to r6 are allocated to locals.
-  static const int kFirstAllocatedRegister = 2;
-
-  static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
-
-  static Register AllocatedRegister(int r) {
-    ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
-    return kAllocatedRegisters[r];
-  }
-
-  // The number of elements on the stack frame.
-  int element_count_;
-  TopOfStack top_of_stack_state_:3;
-  int register_allocation_map_:kNumberOfAllocatedRegisters;
-  static const int kTOSKnownSmiMapSize = 4;
-  unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the sp register).  For now since everything is in memory it is given
-  // by the number of elements on the not-very-virtual stack frame.
-  int stack_pointer() { return element_count_ - 1; }
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count() const;
-  inline int local_count() const;
-
-  // The index of the element that is at the processor's frame pointer
-  // (the fp register).  The parameters, receiver, function, and context
-  // are below the frame pointer.
-  inline int frame_pointer() const;
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame.  It is immediately
-  // below the frame pointer.
-  inline int context_index();
-
-  // The index of the function slot in the frame.  It is below the frame
-  // pointer and context slot.
-  inline int function_index();
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lies the return address.
-  inline int local0_index() const;
-
-  // The index of the base of the expression stack.
-  inline int expression_base_index() const;
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  inline int fp_relative(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // If all top-of-stack registers are in use then the lowest one is pushed
-  // onto the physical stack and made free.
-  void EnsureOneFreeTOSRegister();
-
-  // Emit instructions to get the top of stack state from where we are to where
-  // we want to be.
-  void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
-
-  inline bool Equals(const VirtualFrame* other);
-
-  inline void LowerHeight(int count) {
-    element_count_ -= count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = 0;
-    } else {
-      tos_known_smi_map_ >>= count;
-    }
-  }
-
-  inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
-    ASSERT(count >= 32 || known_smi_map < (1u << count));
-    element_count_ += count;
-    if (count >= kTOSKnownSmiMapSize) {
-      tos_known_smi_map_ = known_smi_map;
-    } else {
-      tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
-    }
-  }
-
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_VIRTUAL_FRAME_ARM_H_
diff --git a/src/assembler.cc b/src/assembler.cc
index 0322747..ca30e19 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -492,7 +492,8 @@
            target_address());
   } else if (IsPosition(rmode_)) {
     PrintF(out, "  (%" V8_PTR_PREFIX "d)", data());
-  } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
+  } else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
+             Isolate::Current()->deoptimizer_data() != NULL) {
     // Depotimization bailouts are stored as runtime entries.
     int id = Deoptimizer::GetDeoptimizationId(
         target_address(), Deoptimizer::EAGER);
@@ -1002,7 +1003,7 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ExternalReference ExternalReference::debug_break(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug::Break)));
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index 62fe04d..e8cecc3 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
@@ -111,7 +111,6 @@
   friend class Assembler;
   friend class RegexpAssembler;
   friend class Displacement;
-  friend class ShadowTarget;
   friend class RegExpMacroAssemblerIrregexp;
 };
 
diff --git a/src/ast-inl.h b/src/ast-inl.h
index 6021fd9..d80684a 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,6 @@
 #include "v8.h"
 
 #include "ast.h"
-#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -62,7 +61,7 @@
 IterationStatement::IterationStatement(ZoneStringList* labels)
     : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
       body_(NULL),
-      continue_target_(JumpTarget::BIDIRECTIONAL),
+      continue_target_(),
       osr_entry_id_(GetNextId()) {
 }
 
diff --git a/src/ast.cc b/src/ast.cc
index 8434357..303189d 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,10 +28,10 @@
 #include "v8.h"
 
 #include "ast.h"
-#include "jump-target-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
+#include "type-info.h"
 
 namespace v8 {
 namespace internal {
@@ -77,20 +77,23 @@
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
       inside_with_(false),
-      is_trivial_(false) {
+      is_trivial_(false),
+      position_(RelocInfo::kNoPosition) {
   BindTo(var);
 }
 
 
 VariableProxy::VariableProxy(Handle<String> name,
                              bool is_this,
-                             bool inside_with)
+                             bool inside_with,
+                             int position)
   : name_(name),
     var_(NULL),
     is_this_(is_this),
     inside_with_(inside_with),
-    is_trivial_(false) {
-  // names must be canonicalized for fast equality checks
+    is_trivial_(false),
+    position_(position) {
+  // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
 }
 
@@ -288,7 +291,7 @@
 }
 
 
-void TargetCollector::AddTarget(BreakTarget* target) {
+void TargetCollector::AddTarget(Label* target) {
   // Add the label to the collector, but discard duplicates.
   int length = targets_->length();
   for (int i = 0; i < length; i++) {
@@ -298,79 +301,6 @@
 }
 
 
-bool Expression::GuaranteedSmiResult() {
-  BinaryOperation* node = AsBinaryOperation();
-  if (node == NULL) return false;
-  Token::Value op = node->op();
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-    case Token::AND:
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-    case Token::BIT_XOR:
-    case Token::SHL:
-      return false;
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_AND: {
-      Literal* left = node->left()->AsLiteral();
-      Literal* right = node->right()->AsLiteral();
-      if (left != NULL && left->handle()->IsSmi()) {
-        int value = Smi::cast(*left->handle())->value();
-        if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
-          // Result of bitwise or is always a negative Smi.
-          return true;
-        }
-        if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
-          // Result of bitwise and is always a positive Smi.
-          return true;
-        }
-      }
-      if (right != NULL && right->handle()->IsSmi()) {
-        int value = Smi::cast(*right->handle())->value();
-        if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
-          // Result of bitwise or is always a negative Smi.
-          return true;
-        }
-        if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
-          // Result of bitwise and is always a positive Smi.
-          return true;
-        }
-      }
-      return false;
-      break;
-    }
-    case Token::SAR:
-    case Token::SHR: {
-      Literal* right = node->right()->AsLiteral();
-       if (right != NULL && right->handle()->IsSmi()) {
-        int value = Smi::cast(*right->handle())->value();
-        if ((value & 0x1F) > 1 ||
-            (op == Token::SAR && (value & 0x1F) == 1)) {
-          return true;
-        }
-       }
-       return false;
-       break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  return false;
-}
-
-
-void Expression::CopyAnalysisResultsFrom(Expression* other) {
-  bitfields_ = other->bitfields_;
-  type_ = other->type_;
-}
-
-
 bool UnaryOperation::ResultOverwriteAllowed() {
   switch (op_) {
     case Token::BIT_NOT:
@@ -413,13 +343,148 @@
   left_ = assignment->target();
   right_ = assignment->value();
   pos_ = assignment->position();
-  CopyAnalysisResultsFrom(assignment);
 }
 
 
 // ----------------------------------------------------------------------------
 // Inlining support
 
+bool Declaration::IsInlineable() const {
+  UNREACHABLE();
+  return false;
+}
+
+
+bool TargetCollector::IsInlineable() const {
+  UNREACHABLE();
+  return false;
+}
+
+
+bool Slot::IsInlineable() const {
+  UNREACHABLE();
+  return false;
+}
+
+
+bool ForInStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool WithEnterStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool WithExitStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool SwitchStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool TryStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool TryCatchStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool TryFinallyStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool CatchExtensionObject::IsInlineable() const {
+  return false;
+}
+
+
+bool DebuggerStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool Throw::IsInlineable() const {
+  return true;
+}
+
+
+bool MaterializedLiteral::IsInlineable() const {
+  // TODO(1322): Allow materialized literals.
+  return false;
+}
+
+
+bool FunctionLiteral::IsInlineable() const {
+  // TODO(1322): Allow materialized literals.
+  return false;
+}
+
+
+bool ThisFunction::IsInlineable() const {
+  return false;
+}
+
+
+bool SharedFunctionInfoLiteral::IsInlineable() const {
+  return false;
+}
+
+
+bool ValidLeftHandSideSentinel::IsInlineable() const {
+  UNREACHABLE();
+  return false;
+}
+
+
+bool ForStatement::IsInlineable() const {
+  return (init() == NULL || init()->IsInlineable())
+      && (cond() == NULL || cond()->IsInlineable())
+      && (next() == NULL || next()->IsInlineable())
+      && body()->IsInlineable();
+}
+
+
+bool WhileStatement::IsInlineable() const {
+  return cond()->IsInlineable()
+      && body()->IsInlineable();
+}
+
+
+bool DoWhileStatement::IsInlineable() const {
+  return cond()->IsInlineable()
+      && body()->IsInlineable();
+}
+
+
+bool ContinueStatement::IsInlineable() const {
+  return true;
+}
+
+
+bool BreakStatement::IsInlineable() const {
+  return true;
+}
+
+
+bool EmptyStatement::IsInlineable() const {
+  return true;
+}
+
+
+bool Literal::IsInlineable() const {
+  return true;
+}
+
+
 bool Block::IsInlineable() const {
   const int count = statements_.length();
   for (int i = 0; i < count; ++i) {
@@ -435,8 +500,9 @@
 
 
 bool IfStatement::IsInlineable() const {
-  return condition()->IsInlineable() && then_statement()->IsInlineable() &&
-      else_statement()->IsInlineable();
+  return condition()->IsInlineable()
+      && then_statement()->IsInlineable()
+      && else_statement()->IsInlineable();
 }
 
 
@@ -546,7 +612,7 @@
   } else if (is_monomorphic_) {
     monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
     if (monomorphic_receiver_type_->has_external_array_elements()) {
-      SetExternalArrayType(oracle->GetKeyedLoadExternalArrayType(this));
+      set_external_array_type(oracle->GetKeyedLoadExternalArrayType(this));
     }
   }
 }
@@ -566,7 +632,19 @@
     // Record receiver type for monomorphic keyed loads.
     monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
     if (monomorphic_receiver_type_->has_external_array_elements()) {
-      SetExternalArrayType(oracle->GetKeyedStoreExternalArrayType(this));
+      set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
+    }
+  }
+}
+
+
+void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+  is_monomorphic_ = oracle->StoreIsMonomorphic(this);
+  if (is_monomorphic_) {
+    // Record receiver type for monomorphic keyed loads.
+    monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
+    if (monomorphic_receiver_type_->has_external_array_elements()) {
+      set_external_array_type(oracle->GetKeyedStoreExternalArrayType(this));
     }
   }
 }
@@ -622,24 +700,21 @@
 
 
 bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
-                               Handle<String> name) {
+                               LookupResult* lookup) {
   target_ = Handle<JSFunction>::null();
   cell_ = Handle<JSGlobalPropertyCell>::null();
-  LookupResult lookup;
-  global->Lookup(*name, &lookup);
-  if (lookup.IsProperty() &&
-      lookup.type() == NORMAL &&
-      lookup.holder() == *global) {
-    cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
-    if (cell_->value()->IsJSFunction()) {
-      Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
-      // If the function is in new space we assume it's more likely to
-      // change and thus prefer the general IC code.
-      if (!HEAP->InNewSpace(*candidate) &&
-          CanCallWithoutIC(candidate, arguments()->length())) {
-        target_ = candidate;
-        return true;
-      }
+  ASSERT(lookup->IsProperty() &&
+         lookup->type() == NORMAL &&
+         lookup->holder() == *global);
+  cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
+  if (cell_->value()->IsJSFunction()) {
+    Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
+    // If the function is in new space we assume it's more likely to
+    // change and thus prefer the general IC code.
+    if (!HEAP->InNewSpace(*candidate) &&
+        CanCallWithoutIC(candidate, arguments()->length())) {
+      target_ = candidate;
+      return true;
     }
   }
   return false;
diff --git a/src/ast.h b/src/ast.h
index e9a06ec..65a25a9 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,6 @@
 #include "execution.h"
 #include "factory.h"
 #include "jsregexp.h"
-#include "jump-target.h"
 #include "runtime.h"
 #include "token.h"
 #include "variables.h"
@@ -88,7 +87,6 @@
   V(CallNew)                                    \
   V(CallRuntime)                                \
   V(UnaryOperation)                             \
-  V(IncrementOperation)                         \
   V(CountOperation)                             \
   V(BinaryOperation)                            \
   V(CompareOperation)                           \
@@ -134,6 +132,7 @@
 #undef DECLARE_TYPE_ENUM
 
   static const int kNoNumber = -1;
+  static const int kFunctionEntryId = 2;  // Using 0 could disguise errors.
 
   AstNode() : id_(GetNextId()) {
     Isolate* isolate = Isolate::Current();
@@ -160,7 +159,7 @@
   virtual Slot* AsSlot() { return NULL; }
 
   // True if the node is simple enough for us to inline calls containing it.
-  virtual bool IsInlineable() const { return false; }
+  virtual bool IsInlineable() const = 0;
 
   static int Count() { return Isolate::Current()->ast_node_count(); }
   static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
@@ -220,7 +219,12 @@
     kTest
   };
 
-  Expression() : bitfields_(0) {}
+  Expression() {}
+
+  virtual int position() const {
+    UNREACHABLE();
+    return 0;
+  }
 
   virtual Expression* AsExpression()  { return this; }
 
@@ -266,70 +270,15 @@
     return Handle<Map>();
   }
 
-  // Static type information for this expression.
-  StaticType* type() { return &type_; }
-
-  // True if the expression is a loop condition.
-  bool is_loop_condition() const {
-    return LoopConditionField::decode(bitfields_);
+  ExternalArrayType external_array_type() const {
+    return external_array_type_;
   }
-  void set_is_loop_condition(bool flag) {
-    bitfields_ = (bitfields_ & ~LoopConditionField::mask()) |
-        LoopConditionField::encode(flag);
-  }
-
-  // The value of the expression is guaranteed to be a smi, because the
-  // top operation is a bit operation with a mask, or a shift.
-  bool GuaranteedSmiResult();
-
-  // AST analysis results.
-  void CopyAnalysisResultsFrom(Expression* other);
-
-  // True if the expression rooted at this node can be compiled by the
-  // side-effect free compiler.
-  bool side_effect_free() { return SideEffectFreeField::decode(bitfields_); }
-  void set_side_effect_free(bool is_side_effect_free) {
-    bitfields_ &= ~SideEffectFreeField::mask();
-    bitfields_ |= SideEffectFreeField::encode(is_side_effect_free);
-  }
-
-  // Will the use of this expression treat -0 the same as 0 in all cases?
-  // If so, we can return 0 instead of -0 if we want to, to optimize code.
-  bool no_negative_zero() { return NoNegativeZeroField::decode(bitfields_); }
-  void set_no_negative_zero(bool no_negative_zero) {
-    bitfields_ &= ~NoNegativeZeroField::mask();
-    bitfields_ |= NoNegativeZeroField::encode(no_negative_zero);
-  }
-
-  // Will ToInt32 (ECMA 262-3 9.5) or ToUint32 (ECMA 262-3 9.6)
-  // be applied to the value of this expression?
-  // If so, we may be able to optimize the calculation of the value.
-  bool to_int32() { return ToInt32Field::decode(bitfields_); }
-  void set_to_int32(bool to_int32) {
-    bitfields_ &= ~ToInt32Field::mask();
-    bitfields_ |= ToInt32Field::encode(to_int32);
-  }
-
-  // How many bitwise logical or shift operators are used in this expression?
-  int num_bit_ops() { return NumBitOpsField::decode(bitfields_); }
-  void set_num_bit_ops(int num_bit_ops) {
-    bitfields_ &= ~NumBitOpsField::mask();
-    num_bit_ops = Min(num_bit_ops, kMaxNumBitOps);
-    bitfields_ |= NumBitOpsField::encode(num_bit_ops);
+  void set_external_array_type(ExternalArrayType array_type) {
+    external_array_type_ = array_type;
   }
 
  private:
-  static const int kMaxNumBitOps = (1 << 5) - 1;
-
-  uint32_t bitfields_;
-  StaticType type_;
-
-  // Using template BitField<type, start, size>.
-  class SideEffectFreeField : public BitField<bool, 0, 1> {};
-  class NoNegativeZeroField : public BitField<bool, 1, 1> {};
-  class ToInt32Field : public BitField<bool, 2, 1> {};
-  class NumBitOpsField : public BitField<int, 3, 5> {};
-  class LoopConditionField: public BitField<bool, 8, 1> {};
+  ExternalArrayType external_array_type_;
 };
 
 
@@ -342,6 +291,7 @@
  public:
   virtual bool IsValidLeftHandSide() { return true; }
   virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
+  virtual bool IsInlineable() const;
 };
 
 
@@ -360,7 +310,7 @@
   virtual BreakableStatement* AsBreakableStatement() { return this; }
 
   // Code generation
-  BreakTarget* break_target() { return &break_target_; }
+  Label* break_target() { return &break_target_; }
 
   // Testers.
   bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
@@ -375,7 +325,7 @@
  private:
   ZoneStringList* labels_;
   Type type_;
-  BreakTarget break_target_;
+  Label break_target_;
   int entry_id_;
   int exit_id_;
 };
@@ -426,6 +376,7 @@
   VariableProxy* proxy() const { return proxy_; }
   Variable::Mode mode() const { return mode_; }
   FunctionLiteral* fun() const { return fun_; }  // may be NULL
+  virtual bool IsInlineable() const;
 
  private:
   VariableProxy* proxy_;
@@ -446,7 +397,7 @@
   virtual int ContinueId() const = 0;
 
   // Code generation
-  BreakTarget* continue_target()  { return &continue_target_; }
+  Label* continue_target()  { return &continue_target_; }
 
  protected:
   explicit inline IterationStatement(ZoneStringList* labels);
@@ -457,7 +408,7 @@
 
  private:
   Statement* body_;
-  BreakTarget continue_target_;
+  Label continue_target_;
   int osr_entry_id_;
 };
 
@@ -484,6 +435,8 @@
   virtual int ContinueId() const { return continue_id_; }
   int BackEdgeId() const { return back_edge_id_; }
 
+  virtual bool IsInlineable() const;
+
  private:
   Expression* cond_;
   int condition_position_;
@@ -510,6 +463,7 @@
   void set_may_have_function_literal(bool value) {
     may_have_function_literal_ = value;
   }
+  virtual bool IsInlineable() const;
 
   // Bailout support.
   virtual int ContinueId() const { return EntryId(); }
@@ -557,6 +511,7 @@
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
   Variable* loop_variable() { return loop_variable_; }
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
+  virtual bool IsInlineable() const;
 
  private:
   Statement* init_;
@@ -584,6 +539,7 @@
 
   Expression* each() const { return each_; }
   Expression* enumerable() const { return enumerable_; }
+  virtual bool IsInlineable() const;
 
   // Bailout support.
   int AssignmentId() const { return assignment_id_; }
@@ -624,6 +580,7 @@
   DECLARE_NODE_TYPE(ContinueStatement)
 
   IterationStatement* target() const { return target_; }
+  virtual bool IsInlineable() const;
 
  private:
   IterationStatement* target_;
@@ -638,6 +595,7 @@
   DECLARE_NODE_TYPE(BreakStatement)
 
   BreakableStatement* target() const { return target_; }
+  virtual bool IsInlineable() const;
 
  private:
   BreakableStatement* target_;
@@ -669,6 +627,7 @@
   Expression* expression() const { return expression_; }
 
   bool is_catch_block() const { return is_catch_block_; }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* expression_;
@@ -680,6 +639,8 @@
  public:
   WithExitStatement() { }
 
+  virtual bool IsInlineable() const;
+
   DECLARE_NODE_TYPE(WithExitStatement)
 };
 
@@ -693,10 +654,10 @@
     CHECK(!is_default());
     return label_;
   }
-  JumpTarget* body_target() { return &body_target_; }
+  Label* body_target() { return &body_target_; }
   ZoneList<Statement*>* statements() const { return statements_; }
 
-  int position() { return position_; }
+  int position() const { return position_; }
   void set_position(int pos) { position_ = pos; }
 
   int EntryId() { return entry_id_; }
@@ -708,7 +669,7 @@
 
  private:
   Expression* label_;
-  JumpTarget body_target_;
+  Label body_target_;
   ZoneList<Statement*>* statements_;
   int position_;
   enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
@@ -730,6 +691,7 @@
 
   Expression* tag() const { return tag_; }
   ZoneList<CaseClause*>* cases() const { return cases_; }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* tag_;
@@ -781,23 +743,24 @@
 // stack in the compiler; this should probably be reworked.
 class TargetCollector: public AstNode {
  public:
-  explicit TargetCollector(ZoneList<BreakTarget*>* targets)
+  explicit TargetCollector(ZoneList<Label*>* targets)
       : targets_(targets) {
   }
 
   // Adds a jump target to the collector. The collector stores a pointer not
   // a copy of the target to make binding work, so make sure not to pass in
   // references to something on the stack.
-  void AddTarget(BreakTarget* target);
+  void AddTarget(Label* target);
 
   // Virtual behaviour. TargetCollectors are never part of the AST.
   virtual void Accept(AstVisitor* v) { UNREACHABLE(); }
   virtual TargetCollector* AsTargetCollector() { return this; }
 
-  ZoneList<BreakTarget*>* targets() { return targets_; }
+  ZoneList<Label*>* targets() { return targets_; }
+  virtual bool IsInlineable() const;
 
  private:
-  ZoneList<BreakTarget*>* targets_;
+  ZoneList<Label*>* targets_;
 };
 
 
@@ -806,16 +769,17 @@
   explicit TryStatement(Block* try_block)
       : try_block_(try_block), escaping_targets_(NULL) { }
 
-  void set_escaping_targets(ZoneList<BreakTarget*>* targets) {
+  void set_escaping_targets(ZoneList<Label*>* targets) {
     escaping_targets_ = targets;
   }
 
   Block* try_block() const { return try_block_; }
-  ZoneList<BreakTarget*>* escaping_targets() const { return escaping_targets_; }
+  ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
+  virtual bool IsInlineable() const;
 
  private:
   Block* try_block_;
-  ZoneList<BreakTarget*>* escaping_targets_;
+  ZoneList<Label*>* escaping_targets_;
 };
 
 
@@ -833,6 +797,7 @@
 
   VariableProxy* catch_var() const { return catch_var_; }
   Block* catch_block() const { return catch_block_; }
+  virtual bool IsInlineable() const;
 
  private:
   VariableProxy* catch_var_;
@@ -849,6 +814,7 @@
   DECLARE_NODE_TYPE(TryFinallyStatement)
 
   Block* finally_block() const { return finally_block_; }
+  virtual bool IsInlineable() const;
 
  private:
   Block* finally_block_;
@@ -858,6 +824,7 @@
 class DebuggerStatement: public Statement {
  public:
   DECLARE_NODE_TYPE(DebuggerStatement)
+  virtual bool IsInlineable() const;
 };
 
 
@@ -865,7 +832,7 @@
  public:
   DECLARE_NODE_TYPE(EmptyStatement)
 
-  virtual bool IsInlineable() const { return true; }
+  virtual bool IsInlineable() const;
 };
 
 
@@ -876,7 +843,6 @@
   DECLARE_NODE_TYPE(Literal)
 
   virtual bool IsTrivial() { return true; }
-  virtual bool IsInlineable() const { return true; }
   virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
 
   // Check if this literal is identical to the other literal.
@@ -915,6 +881,7 @@
   }
 
   Handle<Object> handle() const { return handle_; }
+  virtual bool IsInlineable() const;
 
  private:
   Handle<Object> handle_;
@@ -936,6 +903,7 @@
   bool is_simple() const { return is_simple_; }
 
   int depth() const { return depth_; }
+  virtual bool IsInlineable() const;
 
  private:
   int literal_index_;
@@ -1085,6 +1053,7 @@
 
   Literal* key() const { return key_; }
   VariableProxy* value() const { return value_; }
+  virtual bool IsInlineable() const;
 
  private:
   Literal* key_;
@@ -1135,6 +1104,7 @@
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
   bool inside_with() const { return inside_with_; }
+  int position() const { return position_; }
 
   void MarkAsTrivial() { is_trivial_ = true; }
 
@@ -1147,8 +1117,12 @@
   bool is_this_;
   bool inside_with_;
   bool is_trivial_;
+  int position_;
 
-  VariableProxy(Handle<String> name, bool is_this, bool inside_with);
+  VariableProxy(Handle<String> name,
+                bool is_this,
+                bool inside_with,
+                int position = RelocInfo::kNoPosition);
   explicit VariableProxy(bool is_this);
 
   friend class Scope;
@@ -1206,6 +1180,7 @@
   Type type() const { return type_; }
   int index() const { return index_; }
   bool is_arguments() const { return var_->is_arguments(); }
+  virtual bool IsInlineable() const;
 
  private:
   Variable* var_;
@@ -1241,7 +1216,7 @@
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
   bool is_synthetic() const { return type_ == SYNTHETIC; }
 
   bool IsStringLength() const { return is_string_length_; }
@@ -1255,11 +1230,6 @@
   }
   bool is_arguments_access() const { return is_arguments_access_; }
 
-  ExternalArrayType GetExternalArrayType() const { return array_type_; }
-  void SetExternalArrayType(ExternalArrayType array_type) {
-    array_type_ = array_type;
-  }
-
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
@@ -1283,7 +1253,6 @@
   bool is_function_prototype_ : 1;
   bool is_arguments_access_ : 1;
   Handle<Map> monomorphic_receiver_type_;
-  ExternalArrayType array_type_;
 };
 
 
@@ -1305,7 +1274,7 @@
 
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
-  int position() { return pos_; }
+  virtual int position() const { return pos_; }
 
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual ZoneMapList* GetReceiverTypes() { return receiver_types_; }
@@ -1316,7 +1285,7 @@
   Handle<JSGlobalPropertyCell> cell() { return cell_; }
 
   bool ComputeTarget(Handle<Map> type, Handle<String> name);
-  bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
+  bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
 
   // Bailout support.
   int ReturnId() const { return return_id_; }
@@ -1383,7 +1352,7 @@
 
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
-  int position() { return pos_; }
+  virtual int position() const { return pos_; }
 
  private:
   Expression* expression_;
@@ -1466,7 +1435,7 @@
   Token::Value op() const { return op_; }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
 
   // Bailout support.
   int RightId() const { return right_id_; }
@@ -1482,59 +1451,52 @@
 };
 
 
-class IncrementOperation: public Expression {
- public:
-  IncrementOperation(Token::Value op, Expression* expr)
-      : op_(op), expression_(expr) {
-    ASSERT(Token::IsCountOp(op));
-  }
-
-  DECLARE_NODE_TYPE(IncrementOperation)
-
-  Token::Value op() const { return op_; }
-  bool is_increment() { return op_ == Token::INC; }
-  Expression* expression() const { return expression_; }
-
- private:
-  Token::Value op_;
-  Expression* expression_;
-  int pos_;
-};
-
-
 class CountOperation: public Expression {
  public:
-  CountOperation(bool is_prefix, IncrementOperation* increment, int pos)
-      : is_prefix_(is_prefix), increment_(increment), pos_(pos),
-        assignment_id_(GetNextId()) {
-  }
+  CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos)
+      : op_(op),
+        is_prefix_(is_prefix),
+        expression_(expr),
+        pos_(pos),
+        assignment_id_(GetNextId()),
+        count_id_(GetNextId()) { }
 
   DECLARE_NODE_TYPE(CountOperation)
 
   bool is_prefix() const { return is_prefix_; }
   bool is_postfix() const { return !is_prefix_; }
 
-  Token::Value op() const { return increment_->op(); }
+  Token::Value op() const { return op_; }
   Token::Value binary_op() {
     return (op() == Token::INC) ? Token::ADD : Token::SUB;
   }
 
-  Expression* expression() const { return increment_->expression(); }
-  IncrementOperation* increment() const { return increment_; }
-  int position() const { return pos_; }
+  Expression* expression() const { return expression_; }
+  virtual int position() const { return pos_; }
 
   virtual void MarkAsStatement() { is_prefix_ = true; }
 
   virtual bool IsInlineable() const;
 
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  virtual bool IsMonomorphic() { return is_monomorphic_; }
+  virtual Handle<Map> GetMonomorphicReceiverType() {
+    return monomorphic_receiver_type_;
+  }
+
   // Bailout support.
   int AssignmentId() const { return assignment_id_; }
+  int CountId() const { return count_id_; }
 
  private:
+  Token::Value op_;
   bool is_prefix_;
-  IncrementOperation* increment_;
+  bool is_monomorphic_;
+  Expression* expression_;
   int pos_;
   int assignment_id_;
+  int count_id_;
+  Handle<Map> monomorphic_receiver_type_;
 };
 
 
@@ -1553,7 +1515,7 @@
   Token::Value op() const { return op_; }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
 
   virtual bool IsInlineable() const;
 
@@ -1648,7 +1610,7 @@
   Token::Value op() const { return op_; }
   Expression* target() const { return target_; }
   Expression* value() const { return value_; }
-  int position() { return pos_; }
+  virtual int position() const { return pos_; }
   BinaryOperation* binary_operation() const { return binary_operation_; }
 
   // This check relies on the definition order of token in token.h.
@@ -1670,10 +1632,6 @@
   virtual Handle<Map> GetMonomorphicReceiverType() {
     return monomorphic_receiver_type_;
   }
-  ExternalArrayType GetExternalArrayType() const { return array_type_; }
-  void SetExternalArrayType(ExternalArrayType array_type) {
-    array_type_ = array_type;
-  }
 
   // Bailout support.
   int CompoundLoadId() const { return compound_load_id_; }
@@ -1694,7 +1652,6 @@
   bool is_monomorphic_;
   ZoneMapList* receiver_types_;
   Handle<Map> monomorphic_receiver_type_;
-  ExternalArrayType array_type_;
 };
 
 
@@ -1706,7 +1663,8 @@
   DECLARE_NODE_TYPE(Throw)
 
   Expression* exception() const { return exception_; }
-  int position() const { return pos_; }
+  virtual int position() const { return pos_; }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* exception_;
@@ -1726,8 +1684,7 @@
                   int num_parameters,
                   int start_position,
                   int end_position,
-                  bool is_expression,
-                  bool contains_loops)
+                  bool is_expression)
       : name_(name),
         scope_(scope),
         body_(body),
@@ -1740,10 +1697,8 @@
         start_position_(start_position),
         end_position_(end_position),
         is_expression_(is_expression),
-        contains_loops_(contains_loops),
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(HEAP->empty_string()),
-        try_full_codegen_(false),
         pretenure_(false) { }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
@@ -1756,7 +1711,6 @@
   int start_position() const { return start_position_; }
   int end_position() const { return end_position_; }
   bool is_expression() const { return is_expression_; }
-  bool contains_loops() const { return contains_loops_; }
   bool strict_mode() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
@@ -1781,11 +1735,9 @@
     inferred_name_ = inferred_name;
   }
 
-  bool try_full_codegen() { return try_full_codegen_; }
-  void set_try_full_codegen(bool flag) { try_full_codegen_ = flag; }
-
   bool pretenure() { return pretenure_; }
   void set_pretenure(bool value) { pretenure_ = value; }
+  virtual bool IsInlineable() const;
 
  private:
   Handle<String> name_;
@@ -1799,11 +1751,8 @@
   int start_position_;
   int end_position_;
   bool is_expression_;
-  bool contains_loops_;
-  bool strict_mode_;
   int function_token_position_;
   Handle<String> inferred_name_;
-  bool try_full_codegen_;
   bool pretenure_;
 };
 
@@ -1819,6 +1768,7 @@
   Handle<SharedFunctionInfo> shared_function_info() const {
     return shared_function_info_;
   }
+  virtual bool IsInlineable() const;
 
  private:
   Handle<SharedFunctionInfo> shared_function_info_;
@@ -1828,6 +1778,7 @@
 class ThisFunction: public Expression {
  public:
   DECLARE_NODE_TYPE(ThisFunction)
+  virtual bool IsInlineable() const;
 };
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 9c9bac7..0800714 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -141,7 +141,8 @@
 
 class Genesis BASE_EMBEDDED {
  public:
-  Genesis(Handle<Object> global_object,
+  Genesis(Isolate* isolate,
+          Handle<Object> global_object,
           v8::Handle<v8::ObjectTemplate> global_template,
           v8::ExtensionConfiguration* extensions);
   ~Genesis() { }
@@ -150,8 +151,13 @@
 
   Genesis* previous() { return previous_; }
 
+  Isolate* isolate() const { return isolate_; }
+  Factory* factory() const { return isolate_->factory(); }
+  Heap* heap() const { return isolate_->heap(); }
+
  private:
   Handle<Context> global_context_;
+  Isolate* isolate_;
 
   // There may be more than one active genesis object: When GC is
   // triggered during environment creation there may be weak handle
@@ -163,7 +169,7 @@
   // Creates some basic objects. Used for creating a context from scratch.
   void CreateRoots();
   // Creates the empty function.  Used for creating a context from scratch.
-  Handle<JSFunction> CreateEmptyFunction();
+  Handle<JSFunction> CreateEmptyFunction(Isolate* isolate);
   // Creates the ThrowTypeError function. ECMA 5th Ed. 13.2.3
   Handle<JSFunction> CreateThrowTypeErrorFunction(Builtins::Name builtin);
 
@@ -194,6 +200,7 @@
   // Used for creating a context from scratch.
   void InstallNativeFunctions();
   bool InstallNatives();
+  bool InstallExperimentalNatives();
   void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
@@ -239,7 +246,8 @@
       Handle<FixedArray> arguments,
       Handle<FixedArray> caller);
 
-  static bool CompileBuiltin(int index);
+  static bool CompileBuiltin(Isolate* isolate, int index);
+  static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
   static bool CompileNative(Vector<const char> name, Handle<String> source);
   static bool CompileScriptCached(Vector<const char> name,
                                   Handle<String> source,
@@ -269,12 +277,13 @@
 
 
 Handle<Context> Bootstrapper::CreateEnvironment(
+    Isolate* isolate,
     Handle<Object> global_object,
     v8::Handle<v8::ObjectTemplate> global_template,
     v8::ExtensionConfiguration* extensions) {
   HandleScope scope;
   Handle<Context> env;
-  Genesis genesis(global_object, global_template, extensions);
+  Genesis genesis(isolate, global_object, global_template, extensions);
   env = genesis.result();
   if (!env.is_null()) {
     if (InstallExtensions(env, extensions)) {
@@ -287,15 +296,16 @@
 
 static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
   // object.__proto__ = proto;
+  Factory* factory = object->GetIsolate()->factory();
   Handle<Map> old_to_map = Handle<Map>(object->map());
-  Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
+  Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
   new_to_map->set_prototype(*proto);
   object->set_map(*new_to_map);
 }
 
 
 void Bootstrapper::DetachGlobal(Handle<Context> env) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = env->GetIsolate()->factory();
   JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
   SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
                      factory->null_value());
@@ -322,7 +332,7 @@
                                           Handle<JSObject> prototype,
                                           Builtins::Name call,
                                           bool is_ecma_native) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = target->GetIsolate();
   Factory* factory = isolate->factory();
   Handle<String> symbol = factory->LookupAsciiSymbol(name);
   Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
@@ -344,30 +354,31 @@
 
 Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
     PrototypePropertyMode prototypeMode) {
-  Factory* factory = Isolate::Current()->factory();
   Handle<DescriptorArray> descriptors =
-      factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
   PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // Add length.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *proxy, attributes);
     descriptors->Set(0, &d);
   }
   {  // Add name.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *proxy, attributes);
     descriptors->Set(1, &d);
   }
   {  // Add arguments.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionArguments);
-    CallbacksDescriptor d(*factory->arguments_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionArguments);
+    CallbacksDescriptor d(*factory()->arguments_symbol(), *proxy, attributes);
     descriptors->Set(2, &d);
   }
   {  // Add caller.
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionCaller);
-    CallbacksDescriptor d(*factory->caller_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionCaller);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *proxy, attributes);
     descriptors->Set(3, &d);
   }
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -375,8 +386,8 @@
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
       attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *proxy, attributes);
     descriptors->Set(4, &d);
   }
   descriptors->Sort();
@@ -385,7 +396,7 @@
 
 
 Handle<Map> Genesis::CreateFunctionMap(PrototypePropertyMode prototype_mode) {
-  Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
       ComputeFunctionInstanceDescriptor(prototype_mode);
   map->set_instance_descriptors(*descriptors);
@@ -394,32 +405,34 @@
 }
 
 
-Handle<JSFunction> Genesis::CreateEmptyFunction() {
+Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
   // Allocate the map for function instances. Maps are allocated first and their
   // prototypes patched later, once empty function is created.
 
   // Please note that the prototype property for function instances must be
   // writable.
-  global_context()->set_function_instance_map(
-      *CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE));
+  Handle<Map> function_instance_map =
+      CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
+  global_context()->set_function_instance_map(*function_instance_map);
 
   // Functions with this map will not have a 'prototype' property, and
   // can not be used as constructors.
+  Handle<Map> function_without_prototype_map =
+      CreateFunctionMap(DONT_ADD_PROTOTYPE);
   global_context()->set_function_without_prototype_map(
-      *CreateFunctionMap(DONT_ADD_PROTOTYPE));
+      *function_without_prototype_map);
 
   // Allocate the function map. This map is temporary, used only for processing
   // of builtins.
   // Later the map is replaced with writable prototype map, allocated below.
-  global_context()->set_function_map(
-      *CreateFunctionMap(ADD_READONLY_PROTOTYPE));
+  Handle<Map> function_map = CreateFunctionMap(ADD_READONLY_PROTOTYPE);
+  global_context()->set_function_map(*function_map);
 
   // The final map for functions. Writeable prototype.
   // This map is installed in MakeFunctionInstancePrototypeWritable.
   function_instance_map_writable_prototype_ =
       CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
 
-  Isolate* isolate = Isolate::Current();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
@@ -474,8 +487,6 @@
   function_instance_map_writable_prototype_->set_prototype(*empty_function);
 
   // Allocate the function map first and then patch the prototype later
-  Handle<Map> function_without_prototype_map(
-      global_context()->function_without_prototype_map());
   Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
       function_without_prototype_map);
   empty_fm->set_instance_descriptors(
@@ -490,28 +501,31 @@
     PrototypePropertyMode prototypeMode,
     Handle<FixedArray> arguments,
     Handle<FixedArray> caller) {
-  Factory* factory = Isolate::Current()->factory();
   Handle<DescriptorArray> descriptors =
-      factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
   PropertyAttributes attributes = static_cast<PropertyAttributes>(
       DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // length
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
-    CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *proxy, attributes);
     descriptors->Set(0, &d);
   }
   {  // name
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
-    CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *proxy, attributes);
     descriptors->Set(1, &d);
   }
   {  // arguments
-    CallbacksDescriptor d(*factory->arguments_symbol(), *arguments, attributes);
+    CallbacksDescriptor d(*factory()->arguments_symbol(),
+                          *arguments,
+                          attributes);
     descriptors->Set(2, &d);
   }
   {  // caller
-    CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
     descriptors->Set(3, &d);
   }
 
@@ -520,8 +534,8 @@
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
       attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
-    CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
+    Handle<Proxy> proxy = factory()->NewProxy(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *proxy, attributes);
     descriptors->Set(4, &d);
   }
 
@@ -533,14 +547,11 @@
 // ECMAScript 5th Edition, 13.2.3
 Handle<JSFunction> Genesis::CreateThrowTypeErrorFunction(
     Builtins::Name builtin) {
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-
-  Handle<String> name = factory->LookupAsciiSymbol("ThrowTypeError");
+  Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
   Handle<JSFunction> throw_type_error =
-      factory->NewFunctionWithoutPrototype(name, kStrictMode);
+      factory()->NewFunctionWithoutPrototype(name, kStrictMode);
   Handle<Code> code = Handle<Code>(
-      isolate->builtins()->builtin(builtin));
+      isolate()->builtins()->builtin(builtin));
 
   throw_type_error->set_map(global_context()->strict_mode_function_map());
   throw_type_error->set_code(*code);
@@ -558,7 +569,7 @@
     Handle<JSFunction> empty_function,
     Handle<FixedArray> arguments_callbacks,
     Handle<FixedArray> caller_callbacks) {
-  Handle<Map> map = FACTORY->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
       ComputeStrictFunctionInstanceDescriptor(prototype_mode,
                                               arguments_callbacks,
@@ -573,26 +584,32 @@
 void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
   // Create the callbacks arrays for ThrowTypeError functions.
   // The get/set callacks are filled in after the maps are created below.
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = empty->GetIsolate()->factory();
   Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
   Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
   // Allocate map for the strict mode function instances.
+  Handle<Map> strict_mode_function_instance_map =
+      CreateStrictModeFunctionMap(
+          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_instance_map(
-      *CreateStrictModeFunctionMap(
-          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller));
+      *strict_mode_function_instance_map);
 
   // Allocate map for the prototype-less strict mode instances.
+  Handle<Map> strict_mode_function_without_prototype_map =
+      CreateStrictModeFunctionMap(
+          DONT_ADD_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_without_prototype_map(
-      *CreateStrictModeFunctionMap(
-          DONT_ADD_PROTOTYPE, empty, arguments, caller));
+      *strict_mode_function_without_prototype_map);
 
   // Allocate map for the strict mode functions. This map is temporary, used
   // only for processing of builtins.
   // Later the map is replaced with writable prototype map, allocated below.
+  Handle<Map> strict_mode_function_map =
+      CreateStrictModeFunctionMap(
+          ADD_READONLY_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_map(
-      *CreateStrictModeFunctionMap(
-          ADD_READONLY_PROTOTYPE, empty, arguments, caller));
+      *strict_mode_function_map);
 
   // The final map for the strict mode functions. Writeable prototype.
   // This map is installed in MakeFunctionInstancePrototypeWritable.
@@ -616,7 +633,7 @@
 
 static void AddToWeakGlobalContextList(Context* context) {
   ASSERT(context->IsGlobalContext());
-  Heap* heap = Isolate::Current()->heap();
+  Heap* heap = context->GetIsolate()->heap();
 #ifdef DEBUG
   { // NOLINT
     ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
@@ -634,15 +651,14 @@
 
 
 void Genesis::CreateRoots() {
-  Isolate* isolate = Isolate::Current();
   // Allocate the global context FixedArray first and then patch the
   // closure and extension object later (we need the empty function
   // and the global object, but in order to create those, we need the
   // global context).
-  global_context_ = Handle<Context>::cast(isolate->global_handles()->Create(
-              *isolate->factory()->NewGlobalContext()));
+  global_context_ = Handle<Context>::cast(isolate()->global_handles()->Create(
+              *factory()->NewGlobalContext()));
   AddToWeakGlobalContextList(*global_context_);
-  isolate->set_context(*global_context());
+  isolate()->set_context(*global_context());
 
   // Allocate the message listeners object.
   {
@@ -685,17 +701,13 @@
     }
   }
 
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
-
   if (js_global_template.is_null()) {
-    Handle<String> name = Handle<String>(heap->empty_symbol());
-    Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
+    Handle<String> name = Handle<String>(heap()->empty_symbol());
+    Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
     js_global_function =
-        factory->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
-                             JSGlobalObject::kSize, code, true);
+        factory()->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+                               JSGlobalObject::kSize, code, true);
     // Change the constructor property of the prototype of the
     // hidden global function to refer to the Object function.
     Handle<JSObject> prototype =
@@ -703,20 +715,20 @@
             JSObject::cast(js_global_function->instance_prototype()));
     SetLocalPropertyNoThrow(
         prototype,
-        factory->constructor_symbol(),
-        isolate->object_function(),
+        factory()->constructor_symbol(),
+        isolate()->object_function(),
         NONE);
   } else {
     Handle<FunctionTemplateInfo> js_global_constructor(
         FunctionTemplateInfo::cast(js_global_template->constructor()));
     js_global_function =
-        factory->CreateApiFunction(js_global_constructor,
-                                   factory->InnerGlobalObject);
+        factory()->CreateApiFunction(js_global_constructor,
+                                     factory()->InnerGlobalObject);
   }
 
   js_global_function->initial_map()->set_is_hidden_prototype();
   Handle<GlobalObject> inner_global =
-      factory->NewGlobalObject(js_global_function);
+      factory()->NewGlobalObject(js_global_function);
   if (inner_global_out != NULL) {
     *inner_global_out = inner_global;
   }
@@ -724,23 +736,23 @@
   // Step 2: create or re-initialize the global proxy object.
   Handle<JSFunction> global_proxy_function;
   if (global_template.IsEmpty()) {
-    Handle<String> name = Handle<String>(heap->empty_symbol());
-    Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
+    Handle<String> name = Handle<String>(heap()->empty_symbol());
+    Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
     global_proxy_function =
-        factory->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
-                             JSGlobalProxy::kSize, code, true);
+        factory()->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+                               JSGlobalProxy::kSize, code, true);
   } else {
     Handle<ObjectTemplateInfo> data =
         v8::Utils::OpenHandle(*global_template);
     Handle<FunctionTemplateInfo> global_constructor(
             FunctionTemplateInfo::cast(data->constructor()));
     global_proxy_function =
-        factory->CreateApiFunction(global_constructor,
-                                   factory->OuterGlobalObject);
+        factory()->CreateApiFunction(global_constructor,
+                                     factory()->OuterGlobalObject);
   }
 
-  Handle<String> global_name = factory->LookupAsciiSymbol("global");
+  Handle<String> global_name = factory()->LookupAsciiSymbol("global");
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
 
@@ -754,7 +766,7 @@
         Handle<JSGlobalProxy>::cast(global_object));
   } else {
     return Handle<JSGlobalProxy>::cast(
-        factory->NewJSObject(global_proxy_function, TENURED));
+        factory()->NewJSObject(global_proxy_function, TENURED));
   }
 }
 
@@ -779,7 +791,7 @@
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   ForceSetProperty(builtins_global,
-                   FACTORY->LookupAsciiSymbol("global"),
+                   factory()->LookupAsciiSymbol("global"),
                    inner_global,
                    attributes);
   // Setup the reference from the global object to the builtins object.
@@ -807,7 +819,7 @@
   // object reinitialization.
   global_context()->set_security_token(*inner_global);
 
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = inner_global->GetIsolate();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
@@ -1157,17 +1169,26 @@
 }
 
 
-bool Genesis::CompileBuiltin(int index) {
+bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
   Vector<const char> name = Natives::GetScriptName(index);
   Handle<String> source_code =
-      Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
+      isolate->bootstrapper()->NativesSourceLookup(index);
+  return CompileNative(name, source_code);
+}
+
+
+bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
+  Vector<const char> name = ExperimentalNatives::GetScriptName(index);
+  Factory* factory = isolate->factory();
+  Handle<String> source_code =
+      factory->NewStringFromAscii(ExperimentalNatives::GetScriptSource(index));
   return CompileNative(name, source_code);
 }
 
 
 bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
   HandleScope scope;
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = source->GetIsolate();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate->debugger()->set_compiling_natives(true);
 #endif
@@ -1192,7 +1213,7 @@
                                   v8::Extension* extension,
                                   Handle<Context> top_context,
                                   bool use_runtime_context) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = source->GetIsolate()->factory();
   HandleScope scope;
   Handle<SharedFunctionInfo> function_info;
 
@@ -1239,14 +1260,15 @@
 }
 
 
-#define INSTALL_NATIVE(Type, name, var)                                     \
-  Handle<String> var##_name = factory->LookupAsciiSymbol(name);             \
-  global_context()->set_##var(Type::cast(                                   \
-      global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name)));
+#define INSTALL_NATIVE(Type, name, var)                                       \
+  Handle<String> var##_name = factory()->LookupAsciiSymbol(name);             \
+  Object* var##_native =                                                      \
+      global_context()->builtins()->GetPropertyNoExceptionThrown(             \
+           *var##_name);                                                      \
+  global_context()->set_##var(Type::cast(var##_native));
 
 
 void Genesis::InstallNativeFunctions() {
-  Factory* factory = Isolate::Current()->factory();
   HandleScope scope;
   INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
   INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
@@ -1269,25 +1291,23 @@
 
 bool Genesis::InstallNatives() {
   HandleScope scope;
-  Isolate* isolate = Isolate::Current();
-  Factory* factory = isolate->factory();
-  Heap* heap = isolate->heap();
 
   // Create a function for the builtins object. Allocate space for the
   // JavaScript builtins, a reference to the builtins object
   // (itself) and a reference to the global_context directly in the object.
   Handle<Code> code = Handle<Code>(
-      isolate->builtins()->builtin(Builtins::kIllegal));
+      isolate()->builtins()->builtin(Builtins::kIllegal));
   Handle<JSFunction> builtins_fun =
-      factory->NewFunction(factory->empty_symbol(), JS_BUILTINS_OBJECT_TYPE,
-                           JSBuiltinsObject::kSize, code, true);
+      factory()->NewFunction(factory()->empty_symbol(),
+                             JS_BUILTINS_OBJECT_TYPE,
+                             JSBuiltinsObject::kSize, code, true);
 
-  Handle<String> name = factory->LookupAsciiSymbol("builtins");
+  Handle<String> name = factory()->LookupAsciiSymbol("builtins");
   builtins_fun->shared()->set_instance_class_name(*name);
 
   // Allocate the builtins object.
   Handle<JSBuiltinsObject> builtins =
-      Handle<JSBuiltinsObject>::cast(factory->NewGlobalObject(builtins_fun));
+      Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
   builtins->set_builtins(*builtins);
   builtins->set_global_context(*global_context());
   builtins->set_global_receiver(*builtins);
@@ -1298,7 +1318,7 @@
   // global object.
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-  Handle<String> global_symbol = factory->LookupAsciiSymbol("global");
+  Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
   Handle<Object> global_obj(global_context()->global());
   SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
 
@@ -1307,12 +1327,13 @@
 
   // Create a bridge function that has context in the global context.
   Handle<JSFunction> bridge =
-      factory->NewFunction(factory->empty_symbol(), factory->undefined_value());
-  ASSERT(bridge->context() == *isolate->global_context());
+      factory()->NewFunction(factory()->empty_symbol(),
+                             factory()->undefined_value());
+  ASSERT(bridge->context() == *isolate()->global_context());
 
   // Allocate the builtins context.
   Handle<Context> context =
-    factory->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+    factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
   context->set_global(*builtins);  // override builtins global object
 
   global_context()->set_runtime_context(*context);
@@ -1321,113 +1342,113 @@
     // Builtin functions for Script.
     Handle<JSFunction> script_fun =
         InstallFunction(builtins, "Script", JS_VALUE_TYPE, JSValue::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kIllegal, false);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(script_fun, prototype);
     global_context()->set_script_function(*script_fun);
 
     // Add 'source' and 'data' property to scripts.
     PropertyAttributes common_attributes =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-    Handle<Proxy> proxy_source = factory->NewProxy(&Accessors::ScriptSource);
+    Handle<Proxy> proxy_source = factory()->NewProxy(&Accessors::ScriptSource);
     Handle<DescriptorArray> script_descriptors =
-        factory->CopyAppendProxyDescriptor(
-            factory->empty_descriptor_array(),
-            factory->LookupAsciiSymbol("source"),
+        factory()->CopyAppendProxyDescriptor(
+            factory()->empty_descriptor_array(),
+            factory()->LookupAsciiSymbol("source"),
             proxy_source,
             common_attributes);
-    Handle<Proxy> proxy_name = factory->NewProxy(&Accessors::ScriptName);
+    Handle<Proxy> proxy_name = factory()->NewProxy(&Accessors::ScriptName);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("name"),
+            factory()->LookupAsciiSymbol("name"),
             proxy_name,
             common_attributes);
-    Handle<Proxy> proxy_id = factory->NewProxy(&Accessors::ScriptId);
+    Handle<Proxy> proxy_id = factory()->NewProxy(&Accessors::ScriptId);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("id"),
+            factory()->LookupAsciiSymbol("id"),
             proxy_id,
             common_attributes);
     Handle<Proxy> proxy_line_offset =
-        factory->NewProxy(&Accessors::ScriptLineOffset);
+        factory()->NewProxy(&Accessors::ScriptLineOffset);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("line_offset"),
+            factory()->LookupAsciiSymbol("line_offset"),
             proxy_line_offset,
             common_attributes);
     Handle<Proxy> proxy_column_offset =
-        factory->NewProxy(&Accessors::ScriptColumnOffset);
+        factory()->NewProxy(&Accessors::ScriptColumnOffset);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("column_offset"),
+            factory()->LookupAsciiSymbol("column_offset"),
             proxy_column_offset,
             common_attributes);
-    Handle<Proxy> proxy_data = factory->NewProxy(&Accessors::ScriptData);
+    Handle<Proxy> proxy_data = factory()->NewProxy(&Accessors::ScriptData);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("data"),
+            factory()->LookupAsciiSymbol("data"),
             proxy_data,
             common_attributes);
-    Handle<Proxy> proxy_type = factory->NewProxy(&Accessors::ScriptType);
+    Handle<Proxy> proxy_type = factory()->NewProxy(&Accessors::ScriptType);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("type"),
+            factory()->LookupAsciiSymbol("type"),
             proxy_type,
             common_attributes);
     Handle<Proxy> proxy_compilation_type =
-        factory->NewProxy(&Accessors::ScriptCompilationType);
+        factory()->NewProxy(&Accessors::ScriptCompilationType);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("compilation_type"),
+            factory()->LookupAsciiSymbol("compilation_type"),
             proxy_compilation_type,
             common_attributes);
     Handle<Proxy> proxy_line_ends =
-        factory->NewProxy(&Accessors::ScriptLineEnds);
+        factory()->NewProxy(&Accessors::ScriptLineEnds);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("line_ends"),
+            factory()->LookupAsciiSymbol("line_ends"),
             proxy_line_ends,
             common_attributes);
     Handle<Proxy> proxy_context_data =
-        factory->NewProxy(&Accessors::ScriptContextData);
+        factory()->NewProxy(&Accessors::ScriptContextData);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("context_data"),
+            factory()->LookupAsciiSymbol("context_data"),
             proxy_context_data,
             common_attributes);
     Handle<Proxy> proxy_eval_from_script =
-        factory->NewProxy(&Accessors::ScriptEvalFromScript);
+        factory()->NewProxy(&Accessors::ScriptEvalFromScript);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_script"),
+            factory()->LookupAsciiSymbol("eval_from_script"),
             proxy_eval_from_script,
             common_attributes);
     Handle<Proxy> proxy_eval_from_script_position =
-        factory->NewProxy(&Accessors::ScriptEvalFromScriptPosition);
+        factory()->NewProxy(&Accessors::ScriptEvalFromScriptPosition);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_script_position"),
+            factory()->LookupAsciiSymbol("eval_from_script_position"),
             proxy_eval_from_script_position,
             common_attributes);
     Handle<Proxy> proxy_eval_from_function_name =
-        factory->NewProxy(&Accessors::ScriptEvalFromFunctionName);
+        factory()->NewProxy(&Accessors::ScriptEvalFromFunctionName);
     script_descriptors =
-        factory->CopyAppendProxyDescriptor(
+        factory()->CopyAppendProxyDescriptor(
             script_descriptors,
-            factory->LookupAsciiSymbol("eval_from_function_name"),
+            factory()->LookupAsciiSymbol("eval_from_function_name"),
             proxy_eval_from_function_name,
             common_attributes);
 
@@ -1435,9 +1456,9 @@
     script_map->set_instance_descriptors(*script_descriptors);
 
     // Allocate the empty script.
-    Handle<Script> script = factory->NewScript(factory->empty_string());
+    Handle<Script> script = factory()->NewScript(factory()->empty_string());
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    heap->public_set_empty_script(*script);
+    heap()->public_set_empty_script(*script);
   }
   {
     // Builtin function for OpaqueReference -- a JSValue-based object,
@@ -1446,10 +1467,10 @@
     Handle<JSFunction> opaque_reference_fun =
         InstallFunction(builtins, "OpaqueReference", JS_VALUE_TYPE,
                         JSValue::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kIllegal, false);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(opaque_reference_fun, prototype);
     global_context()->set_opaque_reference_function(*opaque_reference_fun);
   }
@@ -1468,23 +1489,23 @@
                         "InternalArray",
                         JS_ARRAY_TYPE,
                         JSArray::kSize,
-                        isolate->initial_object_prototype(),
+                        isolate()->initial_object_prototype(),
                         Builtins::kArrayCode,
                         true);
     Handle<JSObject> prototype =
-        factory->NewJSObject(isolate->object_function(), TENURED);
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
     SetPrototype(array_function, prototype);
 
     array_function->shared()->set_construct_stub(
-        isolate->builtins()->builtin(Builtins::kArrayConstructCode));
+        isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
-        factory->CopyAppendProxyDescriptor(
-            factory->empty_descriptor_array(),
-            factory->length_symbol(),
-            factory->NewProxy(&Accessors::ArrayLength),
+        factory()->CopyAppendProxyDescriptor(
+            factory()->empty_descriptor_array(),
+            factory()->length_symbol(),
+            factory()->NewProxy(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
     array_function->initial_map()->set_instance_descriptors(
@@ -1500,8 +1521,7 @@
   for (int i = Natives::GetDebuggerCount();
        i < Natives::GetBuiltinsCount();
        i++) {
-    Vector<const char> name = Natives::GetScriptName(i);
-    if (!CompileBuiltin(i)) return false;
+    if (!CompileBuiltin(isolate(), i)) return false;
     // TODO(ager): We really only need to install the JS builtin
     // functions on the builtins object after compiling and running
     // runtime.js.
@@ -1521,9 +1541,9 @@
   InstallBuiltinFunctionIds();
 
   // Install Function.prototype.call and apply.
-  { Handle<String> key = factory->function_class_symbol();
+  { Handle<String> key = factory()->function_class_symbol();
     Handle<JSFunction> function =
-        Handle<JSFunction>::cast(GetProperty(isolate->global(), key));
+        Handle<JSFunction>::cast(GetProperty(isolate()->global(), key));
     Handle<JSObject> proto =
         Handle<JSObject>(JSObject::cast(function->instance_prototype()));
 
@@ -1565,7 +1585,7 @@
 
     // Add initial map.
     Handle<Map> initial_map =
-        factory->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
+        factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
     initial_map->set_constructor(*array_constructor);
 
     // Set prototype on map.
@@ -1579,13 +1599,13 @@
     ASSERT_EQ(1, array_descriptors->number_of_descriptors());
 
     Handle<DescriptorArray> reresult_descriptors =
-        factory->NewDescriptorArray(3);
+        factory()->NewDescriptorArray(3);
 
     reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
 
     int enum_index = 0;
     {
-      FieldDescriptor index_field(heap->index_symbol(),
+      FieldDescriptor index_field(heap()->index_symbol(),
                                   JSRegExpResult::kIndexIndex,
                                   NONE,
                                   enum_index++);
@@ -1593,7 +1613,7 @@
     }
 
     {
-      FieldDescriptor input_field(heap->input_symbol(),
+      FieldDescriptor input_field(heap()->input_symbol(),
                                   JSRegExpResult::kInputIndex,
                                   NONE,
                                   enum_index++);
@@ -1618,10 +1638,22 @@
 }
 
 
+bool Genesis::InstallExperimentalNatives() {
+  if (FLAG_harmony_proxies) {
+    for (int i = ExperimentalNatives::GetDebuggerCount();
+         i < ExperimentalNatives::GetBuiltinsCount();
+         i++) {
+      if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+    }
+  }
+  return true;
+}
+
+
 static Handle<JSObject> ResolveBuiltinIdHolder(
     Handle<Context> global_context,
     const char* holder_expr) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   Handle<GlobalObject> global(global_context->global());
   const char* period_pos = strchr(holder_expr, '.');
   if (period_pos == NULL) {
@@ -1640,7 +1672,8 @@
 static void InstallBuiltinFunctionId(Handle<JSObject> holder,
                                      const char* function_name,
                                      BuiltinFunctionId id) {
-  Handle<String> name = FACTORY->LookupAsciiSymbol(function_name);
+  Factory* factory = holder->GetIsolate()->factory();
+  Handle<String> name = factory->LookupAsciiSymbol(function_name);
   Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
   Handle<JSFunction> function(JSFunction::cast(function_object));
   function->shared()->set_function_data(Smi::FromInt(id));
@@ -1667,13 +1700,14 @@
   F(16, global_context()->regexp_function())
 
 
-static FixedArray* CreateCache(int size, JSFunction* factory) {
+static FixedArray* CreateCache(int size, JSFunction* factory_function) {
+  Factory* factory = factory_function->GetIsolate()->factory();
   // Caches are supposed to live for a long time, allocate in old space.
   int array_size = JSFunctionResultCache::kEntriesIndex + 2 * size;
   // Cannot use cast as object is not fully initialized yet.
   JSFunctionResultCache* cache = reinterpret_cast<JSFunctionResultCache*>(
-      *FACTORY->NewFixedArrayWithHoles(array_size, TENURED));
-  cache->set(JSFunctionResultCache::kFactoryIndex, factory);
+      *factory->NewFixedArrayWithHoles(array_size, TENURED));
+  cache->set(JSFunctionResultCache::kFactoryIndex, factory_function);
   cache->MakeZeroSize();
   return cache;
 }
@@ -1712,7 +1746,7 @@
 
 bool Bootstrapper::InstallExtensions(Handle<Context> global_context,
                                      v8::ExtensionConfiguration* extensions) {
-  Isolate* isolate = Isolate::Current();
+  Isolate* isolate = global_context->GetIsolate();
   BootstrapperActive active;
   SaveContext saved_context(isolate);
   isolate->set_context(*global_context);
@@ -1723,7 +1757,7 @@
 
 
 void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
-  Factory* factory = Isolate::Current()->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   HandleScope scope;
   Handle<JSGlobalObject> js_global(
       JSGlobalObject::cast(global_context->global()));
@@ -1859,9 +1893,10 @@
 
 bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
   HandleScope scope;
+  Factory* factory = builtins->GetIsolate()->factory();
   for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
     Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
-    Handle<String> name = FACTORY->LookupAsciiSymbol(Builtins::GetName(id));
+    Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
     Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
     Handle<JSFunction> function
         = Handle<JSFunction>(JSFunction::cast(function_object));
@@ -1910,13 +1945,12 @@
   ASSERT(object->IsInstanceOf(
       FunctionTemplateInfo::cast(object_template->constructor())));
 
-  Isolate* isolate = Isolate::Current();
   bool pending_exception = false;
   Handle<JSObject> obj =
       Execution::InstantiateObject(object_template, &pending_exception);
   if (pending_exception) {
-    ASSERT(isolate->has_pending_exception());
-    isolate->clear_pending_exception();
+    ASSERT(isolate()->has_pending_exception());
+    isolate()->clear_pending_exception();
     return false;
   }
   TransferObject(obj, object);
@@ -2015,6 +2049,7 @@
 
 void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
   HandleScope outer;
+  Factory* factory = from->GetIsolate()->factory();
 
   ASSERT(!from->IsJSArray());
   ASSERT(!to->IsJSArray());
@@ -2024,7 +2059,7 @@
 
   // Transfer the prototype (new map is needed).
   Handle<Map> old_to_map = Handle<Map>(to->map());
-  Handle<Map> new_to_map = FACTORY->CopyMapDropTransitions(old_to_map);
+  Handle<Map> new_to_map = factory->CopyMapDropTransitions(old_to_map);
   new_to_map->set_prototype(from->map()->prototype());
   to->set_map(*new_to_map);
 }
@@ -2045,10 +2080,10 @@
 }
 
 
-Genesis::Genesis(Handle<Object> global_object,
+Genesis::Genesis(Isolate* isolate,
+                 Handle<Object> global_object,
                  v8::Handle<v8::ObjectTemplate> global_template,
-                 v8::ExtensionConfiguration* extensions) {
-  Isolate* isolate = Isolate::Current();
+                 v8::ExtensionConfiguration* extensions) : isolate_(isolate) {
   result_ = Handle<Context>::null();
   // If V8 isn't running and cannot be initialized, just return.
   if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
@@ -2078,7 +2113,7 @@
   } else {
     // We get here if there was no context snapshot.
     CreateRoots();
-    Handle<JSFunction> empty_function = CreateEmptyFunction();
+    Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
     Handle<GlobalObject> inner_global;
     Handle<JSGlobalProxy> global_proxy =
@@ -2095,6 +2130,9 @@
     isolate->counters()->contexts_created_from_scratch()->Increment();
   }
 
+  // Install experimental natives.
+  if (!InstallExperimentalNatives()) return;
+
   result_ = global_context_;
 }
 
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 3e158d6..018ceef 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -93,6 +93,7 @@
   // Creates a JavaScript Global Context with initial object graph.
   // The returned value is a global handle casted to V8Environment*.
   Handle<Context> CreateEnvironment(
+      Isolate* isolate,
       Handle<Object> global_object,
       v8::Handle<v8::ObjectTemplate> global_template,
       v8::ExtensionConfiguration* extensions);
diff --git a/src/builtins.cc b/src/builtins.cc
index 72f9d57..ae3dab4 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1594,10 +1594,11 @@
 
 void Builtins::Setup(bool create_heap_objects) {
   ASSERT(!initialized_);
-  Heap* heap = Isolate::Current()->heap();
+  Isolate* isolate = Isolate::Current();
+  Heap* heap = isolate->heap();
 
   // Create a scope for the handles in the builtins.
-  HandleScope scope;
+  HandleScope scope(isolate);
 
   const BuiltinDesc* functions = BuiltinFunctionTable::functions();
 
@@ -1609,7 +1610,7 @@
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(buffer, sizeof buffer);
+      MacroAssembler masm(isolate, buffer, sizeof buffer);
       // Generate the code/adaptor.
       typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
       Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
@@ -1634,7 +1635,7 @@
         }
       }
       // Log the event and add the code to the builtins array.
-      PROFILE(ISOLATE,
+      PROFILE(isolate,
               CodeCreateEvent(Logger::BUILTIN_TAG,
                               Code::cast(code),
                               functions[i].s_name));
diff --git a/src/checks.h b/src/checks.h
index 2bb94bb..a560b2f 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -271,6 +271,8 @@
 #define ASSERT_EQ(v1, v2)    CHECK_EQ(v1, v2)
 #define ASSERT_NE(v1, v2)    CHECK_NE(v1, v2)
 #define ASSERT_GE(v1, v2)    CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2)    CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2)    CHECK_LE(v1, v2)
 #define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
 #else
 #define ASSERT_RESULT(expr)     (expr)
@@ -278,6 +280,8 @@
 #define ASSERT_EQ(v1, v2)      ((void) 0)
 #define ASSERT_NE(v1, v2)      ((void) 0)
 #define ASSERT_GE(v1, v2)      ((void) 0)
+#define ASSERT_LT(v1, v2)      ((void) 0)
+#define ASSERT_LE(v1, v2)      ((void) 0)
 #define SLOW_ASSERT(condition) ((void) 0)
 #endif
 // Static asserts has no impact on runtime performance, so they can be
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 2ecd336..f680c60 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -95,7 +95,7 @@
     HandleScope scope(isolate);
 
     // Generate the new code.
-    MacroAssembler masm(NULL, 256);
+    MacroAssembler masm(isolate, NULL, 256);
     GenerateCode(&masm);
 
     // Create the code object.
@@ -132,7 +132,7 @@
   Code* code;
   if (!FindCodeInCache(&code)) {
     // Generate the new code.
-    MacroAssembler masm(NULL, 256);
+    MacroAssembler masm(Isolate::Current(), NULL, 256);
     GenerateCode(&masm);
     Heap* heap = masm.isolate()->heap();
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index d408034..56ef072 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -37,7 +37,6 @@
 // as only the stubs up to and including Instanceof allows nested stub calls.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
-  V(GenericBinaryOp)                     \
   V(TypeRecordingBinaryOp)               \
   V(StringAdd)                           \
   V(SubString)                           \
@@ -50,7 +49,6 @@
   V(Instanceof)                          \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
-  V(IntegerMod)                          \
   V(StackCheck)                          \
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
@@ -164,10 +162,10 @@
   // lazily generated function should be fully optimized or not.
   virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
 
-  // GenericBinaryOpStub needs to override this.
+  // TypeRecordingBinaryOpStub needs to override this.
   virtual int GetCodeKind();
 
-  // GenericBinaryOpStub needs to override this.
+  // TypeRecordingBinaryOpStub needs to override this.
   virtual InlineCacheState GetICState() {
     return UNINITIALIZED;
   }
diff --git a/src/codegen-inl.h b/src/codegen-inl.h
deleted file mode 100644
index f7da54a..0000000
--- a/src/codegen-inl.h
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_CODEGEN_INL_H_
-#define V8_CODEGEN_INL_H_
-
-#include "codegen.h"
-#include "compiler.h"
-#include "register-allocator-inl.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/codegen-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/codegen-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/codegen-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/codegen-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Handle<Script> CodeGenerator::script() { return info_->script(); }
-
-bool CodeGenerator::is_eval() { return info_->is_eval(); }
-
-Scope* CodeGenerator::scope() { return info_->function()->scope(); }
-
-bool CodeGenerator::is_strict_mode() {
-  return info_->function()->strict_mode();
-}
-
-StrictModeFlag CodeGenerator::strict_mode_flag() {
-  return is_strict_mode() ? kStrictMode : kNonStrictMode;
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_CODEGEN_INL_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index 03f64a1..4bbe6ae 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,16 +28,14 @@
 #include "v8.h"
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "prettyprinter.h"
-#include "register-allocator-inl.h"
 #include "rewriter.h"
 #include "runtime.h"
 #include "scopeinfo.h"
 #include "stub-cache.h"
-#include "virtual-frame-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -61,64 +59,6 @@
 #undef __
 
 
-void CodeGenerator::ProcessDeferred() {
-  while (!deferred_.is_empty()) {
-    DeferredCode* code = deferred_.RemoveLast();
-    ASSERT(masm_ == code->masm());
-    // Record position of deferred code stub.
-    masm_->positions_recorder()->RecordStatementPosition(
-        code->statement_position());
-    if (code->position() != RelocInfo::kNoPosition) {
-      masm_->positions_recorder()->RecordPosition(code->position());
-    }
-    // Generate the code.
-    Comment cmnt(masm_, code->comment());
-    masm_->bind(code->entry_label());
-    if (code->AutoSaveAndRestore()) {
-      code->SaveRegisters();
-    }
-    code->Generate();
-    if (code->AutoSaveAndRestore()) {
-      code->RestoreRegisters();
-      code->Exit();
-    }
-  }
-}
-
-
-void DeferredCode::Exit() {
-  masm_->jmp(exit_label());
-}
-
-
-void CodeGenerator::SetFrame(VirtualFrame* new_frame,
-                             RegisterFile* non_frame_registers) {
-  RegisterFile saved_counts;
-  if (has_valid_frame()) {
-    frame_->DetachFromCodeGenerator();
-    // The remaining register reference counts are the non-frame ones.
-    allocator_->SaveTo(&saved_counts);
-  }
-
-  if (new_frame != NULL) {
-    // Restore the non-frame register references that go with the new frame.
-    allocator_->RestoreFrom(non_frame_registers);
-    new_frame->AttachToCodeGenerator();
-  }
-
-  frame_ = new_frame;
-  saved_counts.CopyTo(non_frame_registers);
-}
-
-
-void CodeGenerator::DeleteFrame() {
-  if (has_valid_frame()) {
-    frame_->DetachFromCodeGenerator();
-    frame_ = NULL;
-  }
-}
-
-
 void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
 #ifdef DEBUG
   bool print_source = false;
@@ -230,61 +170,10 @@
 #endif  // ENABLE_DISASSEMBLER
 }
 
-
-// Generate the code.  Compile the AST and assemble all the pieces into a
-// Code object.
-bool CodeGenerator::MakeCode(CompilationInfo* info) {
-  // When using Crankshaft the classic backend should never be used.
-  ASSERT(!V8::UseCrankshaft());
-  Handle<Script> script = info->script();
-  if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-    int len = String::cast(script->source())->length();
-    Counters* counters = info->isolate()->counters();
-    counters->total_old_codegen_source_size()->Increment(len);
-  }
-  if (FLAG_trace_codegen) {
-    PrintF("Classic Compiler - ");
-  }
-  MakeCodePrologue(info);
-  // Generate code.
-  const int kInitialBufferSize = 4 * KB;
-  MacroAssembler masm(NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
-  CodeGenerator cgen(&masm);
-  CodeGeneratorScope scope(Isolate::Current(), &cgen);
-  cgen.Generate(info);
-  if (cgen.HasStackOverflow()) {
-    ASSERT(!Isolate::Current()->has_pending_exception());
-    return false;
-  }
-
-  InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
-  // There is no stack check table in code generated by the classic backend.
-  code->SetNoStackCheckTable();
-  CodeGenerator::PrintCode(code, info);
-  info->SetCode(code);  // May be an empty handle.
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  if (FLAG_gdbjit && !code.is_null()) {
-    GDBJITLineInfo* lineinfo =
-        masm.positions_recorder()->DetachGDBJITLineInfo();
-
-    GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
-  }
-#endif
-  return !code.is_null();
-}
-
-
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
-
 static Vector<const char> kRegexp = CStrVector("regexp");
 
-
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
   ASSERT(type != NULL);
   if (!LOGGER->is_logging() && !CpuProfiler::is_profiling()) return false;
@@ -299,120 +188,6 @@
 #endif
 
 
-void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
-  int length = declarations->length();
-  int globals = 0;
-  for (int i = 0; i < length; i++) {
-    Declaration* node = declarations->at(i);
-    Variable* var = node->proxy()->var();
-    Slot* slot = var->AsSlot();
-
-    // If it was not possible to allocate the variable at compile
-    // time, we need to "declare" it at runtime to make sure it
-    // actually exists in the local context.
-    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
-      VisitDeclaration(node);
-    } else {
-      // Count global variables and functions for later processing
-      globals++;
-    }
-  }
-
-  // Return in case of no declared global functions or variables.
-  if (globals == 0) return;
-
-  // Compute array of global variable and function declarations.
-  Handle<FixedArray> array = FACTORY->NewFixedArray(2 * globals, TENURED);
-  for (int j = 0, i = 0; i < length; i++) {
-    Declaration* node = declarations->at(i);
-    Variable* var = node->proxy()->var();
-    Slot* slot = var->AsSlot();
-
-    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
-      // Skip - already processed.
-    } else {
-      array->set(j++, *(var->name()));
-      if (node->fun() == NULL) {
-        if (var->mode() == Variable::CONST) {
-          // In case this is const property use the hole.
-          array->set_the_hole(j++);
-        } else {
-          array->set_undefined(j++);
-        }
-      } else {
-        Handle<SharedFunctionInfo> function =
-            Compiler::BuildFunctionInfo(node->fun(), script());
-        // Check for stack-overflow exception.
-        if (function.is_null()) {
-          SetStackOverflow();
-          return;
-        }
-        array->set(j++, *function);
-      }
-    }
-  }
-
-  // Invoke the platform-dependent code generator to do the actual
-  // declaration the global variables and functions.
-  DeclareGlobals(array);
-}
-
-
-void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize)          \
-    &CodeGenerator::Generate##Name,
-
-const CodeGenerator::InlineFunctionGenerator
-    CodeGenerator::kInlineFunctionGenerators[] = {
-        INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-        INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
-  ZoneList<Expression*>* args = node->arguments();
-  Handle<String> name = node->name();
-  const Runtime::Function* function = node->function();
-  if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
-    int lookup_index = static_cast<int>(function->function_id) -
-        static_cast<int>(Runtime::kFirstInlineFunction);
-    ASSERT(lookup_index >= 0);
-    ASSERT(static_cast<size_t>(lookup_index) <
-           ARRAY_SIZE(kInlineFunctionGenerators));
-    InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
-    (this->*generator)(args);
-    return true;
-  }
-  return false;
-}
-
-
-// Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-// known result for the test expression, with no side effects.
-CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
-    Expression* cond) {
-  if (cond == NULL) return ALWAYS_TRUE;
-
-  Literal* lit = cond->AsLiteral();
-  if (lit == NULL) return DONT_KNOW;
-
-  if (lit->IsTrue()) {
-    return ALWAYS_TRUE;
-  } else if (lit->IsFalse()) {
-    return ALWAYS_FALSE;
-  }
-
-  return DONT_KNOW;
-}
-
-
 bool CodeGenerator::RecordPositions(MacroAssembler* masm,
                                     int pos,
                                     bool right_here) {
@@ -427,34 +202,6 @@
 }
 
 
-void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
-}
-
-
-void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(masm(), fun->end_position() - 1, false);
-}
-
-
-void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
-  if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
-}
-
-
-void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
-  if (FLAG_debug_info)
-    RecordPositions(masm(), stmt->condition_position(), false);
-}
-
-
-void CodeGenerator::CodeForSourcePosition(int pos) {
-  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
-    masm()->positions_recorder()->RecordPosition(pos);
-  }
-}
-
-
 const char* GenericUnaryOpStub::GetName() {
   switch (op_) {
     case Token::SUB:
diff --git a/src/codegen.h b/src/codegen.h
index aa31999..e551abf 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -54,7 +54,6 @@
 // shared code:
 //   CodeGenerator
 //   ~CodeGenerator
-//   ProcessDeferred
 //   Generate
 //   ComputeLazyCompile
 //   BuildFunctionInfo
@@ -68,7 +67,6 @@
 //   CodeForDoWhileConditionPosition
 //   CodeForSourcePosition
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 #if V8_TARGET_ARCH_IA32
@@ -83,163 +81,4 @@
 #error Unsupported target architecture.
 #endif
 
-#include "register-allocator.h"
-
-namespace v8 {
-namespace internal {
-
-// Code generation can be nested.  Code generation scopes form a stack
-// of active code generators.
-class CodeGeneratorScope BASE_EMBEDDED {
- public:
-  explicit CodeGeneratorScope(Isolate* isolate, CodeGenerator* cgen)
-      : isolate_(isolate) {
-    previous_ = isolate->current_code_generator();
-    isolate->set_current_code_generator(cgen);
-  }
-
-  ~CodeGeneratorScope() {
-    isolate_->set_current_code_generator(previous_);
-  }
-
-  static CodeGenerator* Current(Isolate* isolate) {
-    ASSERT(isolate->current_code_generator() != NULL);
-    return isolate->current_code_generator();
-  }
-
- private:
-  CodeGenerator* previous_;
-  Isolate* isolate_;
-};
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-
-// State of used registers in a virtual frame.
-class FrameRegisterState {
- public:
-  // Captures the current state of the given frame.
-  explicit FrameRegisterState(VirtualFrame* frame);
-
-  // Saves the state in the stack.
-  void Save(MacroAssembler* masm) const;
-
-  // Restores the state from the stack.
-  void Restore(MacroAssembler* masm) const;
-
- private:
-  // Constants indicating special actions.  They should not be multiples
-  // of kPointerSize so they will not collide with valid offsets from
-  // the frame pointer.
-  static const int kIgnore = -1;
-  static const int kPush = 1;
-
-  // This flag is ored with a valid offset from the frame pointer, so
-  // it should fit in the low zero bits of a valid offset.
-  static const int kSyncedFlag = 2;
-
-  int registers_[RegisterAllocator::kNumRegisters];
-};
-
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
-
-
-class FrameRegisterState {
- public:
-  inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
-
-  inline const VirtualFrame* frame() const { return &frame_; }
-
- private:
-  VirtualFrame frame_;
-};
-
-#else
-
-#error Unsupported target architecture.
-
-#endif
-
-
-// RuntimeCallHelper implementation that saves/restores state of a
-// virtual frame.
-class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
- public:
-  // Does not take ownership of |frame_state|.
-  explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
-      : frame_state_(frame_state) {}
-
-  virtual void BeforeCall(MacroAssembler* masm) const;
-
-  virtual void AfterCall(MacroAssembler* masm) const;
-
- private:
-  const FrameRegisterState* frame_state_;
-};
-
-
-// Deferred code objects are small pieces of code that are compiled
-// out of line. They are used to defer the compilation of uncommon
-// paths thereby avoiding expensive jumps around uncommon code parts.
-class DeferredCode: public ZoneObject {
- public:
-  DeferredCode();
-  virtual ~DeferredCode() { }
-
-  virtual void Generate() = 0;
-
-  MacroAssembler* masm() { return masm_; }
-
-  int statement_position() const { return statement_position_; }
-  int position() const { return position_; }
-
-  Label* entry_label() { return &entry_label_; }
-  Label* exit_label() { return &exit_label_; }
-
-#ifdef DEBUG
-  void set_comment(const char* comment) { comment_ = comment; }
-  const char* comment() const { return comment_; }
-#else
-  void set_comment(const char* comment) { }
-  const char* comment() const { return ""; }
-#endif
-
-  inline void Jump();
-  inline void Branch(Condition cc);
-  void BindExit() { masm_->bind(&exit_label_); }
-
-  const FrameRegisterState* frame_state() const { return &frame_state_; }
-
-  void SaveRegisters();
-  void RestoreRegisters();
-  void Exit();
-
-  // If this returns true then all registers will be saved for the duration
-  // of the Generate() call.  Otherwise the registers are not saved and the
-  // Generate() call must bracket runtime any runtime calls with calls to
-  // SaveRegisters() and RestoreRegisters().  In this case the Generate
-  // method must also call Exit() in order to return to the non-deferred
-  // code.
-  virtual bool AutoSaveAndRestore() { return true; }
-
- protected:
-  MacroAssembler* masm_;
-
- private:
-  int statement_position_;
-  int position_;
-
-  Label entry_label_;
-  Label exit_label_;
-
-  FrameRegisterState frame_state_;
-
-#ifdef DEBUG
-  const char* comment_;
-#endif
-  DISALLOW_COPY_AND_ASSIGN(DeferredCode);
-};
-
-
-} }  // namespace v8::internal
-
 #endif  // V8_CODEGEN_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 1ec4414..86d5de3 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #include "compiler.h"
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compilation-cache.h"
 #include "data-flow.h"
 #include "debug.h"
@@ -326,30 +326,9 @@
 
   if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
     if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
-
-    // Generate code and return it.  Code generator selection is governed by
-    // which backends are enabled and whether the function is considered
-    // run-once code or not.
-    //
-    // --full-compiler enables the dedicated backend for code we expect to
-    // be run once
-    //
-    // The normal choice of backend can be overridden with the flags
-    // --always-full-compiler.
-    if (Rewriter::Analyze(info)) {
-      Handle<SharedFunctionInfo> shared = info->shared_info();
-      bool is_run_once = (shared.is_null())
-          ? info->scope()->is_global_scope()
-          : (shared->is_toplevel() || shared->try_full_codegen());
-      bool can_use_full =
-          FLAG_full_compiler && !info->function()->contains_loops();
-      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
-        return FullCodeGenerator::MakeCode(info);
-      } else {
-        return AssignedVariablesAnalyzer::Analyze(info) &&
-            CodeGenerator::MakeCode(info);
-      }
-    }
+    // If crankshaft is not supported fall back to full code generator
+    // for all compilation.
+    return FullCodeGenerator::MakeCode(info);
   }
 
   return false;
@@ -388,11 +367,11 @@
     // For eval scripts add information on the function from which eval was
     // called.
     if (info->is_eval()) {
-      StackTraceFrameIterator it;
+      StackTraceFrameIterator it(isolate);
       if (!it.done()) {
         script->set_eval_from_shared(
             JSFunction::cast(it.frame()->function())->shared());
-        Code* code = it.frame()->LookupCode(isolate);
+        Code* code = it.frame()->LookupCode();
         int offset = static_cast<int>(
             it.frame()->pc() - code->instruction_start());
         script->set_eval_from_instructions_offset(Smi::FromInt(offset));
@@ -588,7 +567,7 @@
     CompilationInfo info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
-    if (strict_mode == kStrictMode) info.MarkAsStrict();
+    if (strict_mode == kStrictMode) info.MarkAsStrictMode();
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
@@ -625,6 +604,12 @@
     // parsing statistics.
     HistogramTimerScope timer(isolate->counters()->compile_lazy());
 
+    // After parsing we know function's strict mode. Remember it.
+    if (info->function()->strict_mode()) {
+      shared->set_strict_mode(true);
+      info->MarkAsStrictMode();
+    }
+
     // Compile the code.
     if (!MakeCode(info)) {
       if (!isolate->has_pending_exception()) {
@@ -721,35 +706,12 @@
   if (FLAG_lazy && allow_lazy) {
     Handle<Code> code = info.isolate()->builtins()->LazyCompile();
     info.SetCode(code);
-  } else {
-    if (V8::UseCrankshaft()) {
-      if (!MakeCrankshaftCode(&info)) {
-        return Handle<SharedFunctionInfo>::null();
-      }
-    } else {
-      // The bodies of function literals have not yet been visited by the
-      // AST optimizer/analyzer.
-      if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
-
-      bool is_run_once = literal->try_full_codegen();
-      bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
-
-      if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
-        if (!FullCodeGenerator::MakeCode(&info)) {
-          return Handle<SharedFunctionInfo>::null();
-        }
-      } else {
-        // We fall back to the classic V8 code generator.
-        if (!AssignedVariablesAnalyzer::Analyze(&info) ||
-            !CodeGenerator::MakeCode(&info)) {
-          return Handle<SharedFunctionInfo>::null();
-        }
-      }
-    }
+  } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
+             (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-
-    // Function compilation complete.
     scope_info = SerializedScopeInfo::Create(info.scope());
+  } else {
+    return Handle<SharedFunctionInfo>::null();
   }
 
   // Create a shared function info object.
@@ -791,7 +753,6 @@
   function_info->SetThisPropertyAssignmentsInfo(
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
-  function_info->set_try_full_codegen(lit->try_full_codegen());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
   function_info->set_strict_mode(lit->strict_mode());
 }
@@ -829,7 +790,7 @@
     }
   }
 
-  GDBJIT(AddCode(name,
+  GDBJIT(AddCode(Handle<String>(shared->DebugName()),
                  Handle<Script>(info->script()),
                  Handle<Code>(info->code())));
 }
diff --git a/src/compiler.h b/src/compiler.h
index a66c540..e75e869 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,6 @@
 
 #include "ast.h"
 #include "frame-element.h"
-#include "register-allocator.h"
 #include "zone.h"
 
 namespace v8 {
@@ -53,7 +52,7 @@
   bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
   bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
   bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
-  bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
+  bool is_strict_mode() const { return (flags_ & IsStrictMode::mask()) != 0; }
   bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
@@ -74,11 +73,11 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
-  void MarkAsStrict() {
-    flags_ |= IsStrict::encode(true);
+  void MarkAsStrictMode() {
+    flags_ |= IsStrictMode::encode(true);
   }
   StrictModeFlag StrictMode() {
-    return is_strict() ? kStrictMode : kNonStrictMode;
+    return is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
   void MarkAsInLoop() {
     ASSERT(is_lazy());
@@ -165,7 +164,7 @@
   void Initialize(Mode mode) {
     mode_ = V8::UseCrankshaft() ? mode : NONOPT;
     if (!shared_info_.is_null() && shared_info_->strict_mode()) {
-      MarkAsStrict();
+      MarkAsStrictMode();
     }
   }
 
@@ -185,7 +184,7 @@
   // Flags that can be set for lazy compilation.
   class IsInLoop: public BitField<bool, 3, 1> {};
   // Strict mode - used in eager compilation.
-  class IsStrict: public BitField<bool, 4, 1> {};
+  class IsStrictMode: public BitField<bool, 4, 1> {};
   // Native syntax (%-stuff) allowed?
   class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
 
@@ -239,6 +238,8 @@
   // give up.
   static const int kDefaultMaxOptCount = 10;
 
+  static const int kMaxInliningLevels = 3;
+
   // All routines return a SharedFunctionInfo.
   // If an error occurs an exception is raised and the return handle
   // contains NULL.
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index bf02947..cb7dbf8 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -60,11 +60,7 @@
   if (x < k2Pow52) {
     x += k2Pow52;
     uint32_t result;
-#ifdef BIG_ENDIAN_FLOATING_POINT
-    Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
-#else
     Address mantissa_ptr = reinterpret_cast<Address>(&x);
-#endif
     // Copy least significant 32 bits of mantissa.
     memcpy(&result, mantissa_ptr, sizeof(result));
     return negative ? ~result + 1 : result;
diff --git a/src/conversions.cc b/src/conversions.cc
index c3d7bdf..1458584 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -109,11 +109,11 @@
 
 // Returns true if a nonspace found and false if the end has reached.
 template <class Iterator, class EndMark>
-static inline bool AdvanceToNonspace(ScannerConstants* scanner_constants,
+static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
                                      Iterator* current,
                                      EndMark end) {
   while (*current != end) {
-    if (!scanner_constants->IsWhiteSpace(**current)) return true;
+    if (!unicode_cache->IsWhiteSpace(**current)) return true;
     ++*current;
   }
   return false;
@@ -134,7 +134,7 @@
 
 // Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
 template <int radix_log_2, class Iterator, class EndMark>
-static double InternalStringToIntDouble(ScannerConstants* scanner_constants,
+static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
                                         Iterator current,
                                         EndMark end,
                                         bool negative,
@@ -161,7 +161,7 @@
       digit = static_cast<char>(*current) - 'A' + 10;
     } else {
       if (allow_trailing_junk ||
-          !AdvanceToNonspace(scanner_constants, &current, end)) {
+          !AdvanceToNonspace(unicode_cache, &current, end)) {
         break;
       } else {
         return JUNK_STRING_VALUE;
@@ -193,7 +193,7 @@
       }
 
       if (!allow_trailing_junk &&
-          AdvanceToNonspace(scanner_constants, &current, end)) {
+          AdvanceToNonspace(unicode_cache, &current, end)) {
         return JUNK_STRING_VALUE;
       }
 
@@ -237,14 +237,14 @@
 
 
 template <class Iterator, class EndMark>
-static double InternalStringToInt(ScannerConstants* scanner_constants,
+static double InternalStringToInt(UnicodeCache* unicode_cache,
                                   Iterator current,
                                   EndMark end,
                                   int radix) {
   const bool allow_trailing_junk = true;
   const double empty_string_val = JUNK_STRING_VALUE;
 
-  if (!AdvanceToNonspace(scanner_constants, &current, end)) {
+  if (!AdvanceToNonspace(unicode_cache, &current, end)) {
     return empty_string_val;
   }
 
@@ -254,12 +254,12 @@
   if (*current == '+') {
     // Ignore leading sign; skip following spaces.
     ++current;
-    if (!AdvanceToNonspace(scanner_constants, &current, end)) {
+    if (!AdvanceToNonspace(unicode_cache, &current, end)) {
       return JUNK_STRING_VALUE;
     }
   } else if (*current == '-') {
     ++current;
-    if (!AdvanceToNonspace(scanner_constants, &current, end)) {
+    if (!AdvanceToNonspace(unicode_cache, &current, end)) {
       return JUNK_STRING_VALUE;
     }
     negative = true;
@@ -312,21 +312,21 @@
     switch (radix) {
       case 2:
         return InternalStringToIntDouble<1>(
-            scanner_constants, current, end, negative, allow_trailing_junk);
+            unicode_cache, current, end, negative, allow_trailing_junk);
       case 4:
         return InternalStringToIntDouble<2>(
-            scanner_constants, current, end, negative, allow_trailing_junk);
+            unicode_cache, current, end, negative, allow_trailing_junk);
       case 8:
         return InternalStringToIntDouble<3>(
-            scanner_constants, current, end, negative, allow_trailing_junk);
+            unicode_cache, current, end, negative, allow_trailing_junk);
 
       case 16:
         return InternalStringToIntDouble<4>(
-            scanner_constants, current, end, negative, allow_trailing_junk);
+            unicode_cache, current, end, negative, allow_trailing_junk);
 
       case 32:
         return InternalStringToIntDouble<5>(
-            scanner_constants, current, end, negative, allow_trailing_junk);
+            unicode_cache, current, end, negative, allow_trailing_junk);
       default:
         UNREACHABLE();
     }
@@ -352,7 +352,7 @@
     }
 
     if (!allow_trailing_junk &&
-        AdvanceToNonspace(scanner_constants, &current, end)) {
+        AdvanceToNonspace(unicode_cache, &current, end)) {
       return JUNK_STRING_VALUE;
     }
 
@@ -418,7 +418,7 @@
   } while (!done);
 
   if (!allow_trailing_junk &&
-      AdvanceToNonspace(scanner_constants, &current, end)) {
+      AdvanceToNonspace(unicode_cache, &current, end)) {
     return JUNK_STRING_VALUE;
   }
 
@@ -432,7 +432,7 @@
 // 2. *current - gets the current character in the sequence.
 // 3. ++current (advances the position).
 template <class Iterator, class EndMark>
-static double InternalStringToDouble(ScannerConstants* scanner_constants,
+static double InternalStringToDouble(UnicodeCache* unicode_cache,
                                      Iterator current,
                                      EndMark end,
                                      int flags,
@@ -445,7 +445,7 @@
   // 'parsing_done'.
   // 4. 'current' is not dereferenced after the 'parsing_done' label.
   // 5. Code before 'parsing_done' may rely on 'current != end'.
-  if (!AdvanceToNonspace(scanner_constants, &current, end)) {
+  if (!AdvanceToNonspace(unicode_cache, &current, end)) {
     return empty_string_val;
   }
 
@@ -483,7 +483,7 @@
     }
 
     if (!allow_trailing_junk &&
-        AdvanceToNonspace(scanner_constants, &current, end)) {
+        AdvanceToNonspace(unicode_cache, &current, end)) {
       return JUNK_STRING_VALUE;
     }
 
@@ -505,7 +505,7 @@
         return JUNK_STRING_VALUE;  // "0x".
       }
 
-      return InternalStringToIntDouble<4>(scanner_constants,
+      return InternalStringToIntDouble<4>(unicode_cache,
                                           current,
                                           end,
                                           negative,
@@ -643,7 +643,7 @@
   }
 
   if (!allow_trailing_junk &&
-      AdvanceToNonspace(scanner_constants, &current, end)) {
+      AdvanceToNonspace(unicode_cache, &current, end)) {
     return JUNK_STRING_VALUE;
   }
 
@@ -651,7 +651,7 @@
   exponent += insignificant_digits;
 
   if (octal) {
-    return InternalStringToIntDouble<3>(scanner_constants,
+    return InternalStringToIntDouble<3>(unicode_cache,
                                         buffer,
                                         buffer + buffer_pos,
                                         negative,
@@ -671,23 +671,22 @@
 }
 
 
-double StringToDouble(String* str, int flags, double empty_string_val) {
-  ScannerConstants* scanner_constants =
-      Isolate::Current()->scanner_constants();
+double StringToDouble(UnicodeCache* unicode_cache,
+                      String* str, int flags, double empty_string_val) {
   StringShape shape(str);
   if (shape.IsSequentialAscii()) {
     const char* begin = SeqAsciiString::cast(str)->GetChars();
     const char* end = begin + str->length();
-    return InternalStringToDouble(scanner_constants, begin, end, flags,
+    return InternalStringToDouble(unicode_cache, begin, end, flags,
                                   empty_string_val);
   } else if (shape.IsSequentialTwoByte()) {
     const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
     const uc16* end = begin + str->length();
-    return InternalStringToDouble(scanner_constants, begin, end, flags,
+    return InternalStringToDouble(unicode_cache, begin, end, flags,
                                   empty_string_val);
   } else {
     StringInputBuffer buffer(str);
-    return InternalStringToDouble(scanner_constants,
+    return InternalStringToDouble(unicode_cache,
                                   StringInputBufferIterator(&buffer),
                                   StringInputBufferIterator::EndMarker(),
                                   flags,
@@ -696,21 +695,21 @@
 }
 
 
-double StringToInt(String* str, int radix) {
-  ScannerConstants* scanner_constants =
-      Isolate::Current()->scanner_constants();
+double StringToInt(UnicodeCache* unicode_cache,
+                   String* str,
+                   int radix) {
   StringShape shape(str);
   if (shape.IsSequentialAscii()) {
     const char* begin = SeqAsciiString::cast(str)->GetChars();
     const char* end = begin + str->length();
-    return InternalStringToInt(scanner_constants, begin, end, radix);
+    return InternalStringToInt(unicode_cache, begin, end, radix);
   } else if (shape.IsSequentialTwoByte()) {
     const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
     const uc16* end = begin + str->length();
-    return InternalStringToInt(scanner_constants, begin, end, radix);
+    return InternalStringToInt(unicode_cache, begin, end, radix);
   } else {
     StringInputBuffer buffer(str);
-    return InternalStringToInt(scanner_constants,
+    return InternalStringToInt(unicode_cache,
                                StringInputBufferIterator(&buffer),
                                StringInputBufferIterator::EndMarker(),
                                radix);
@@ -718,22 +717,20 @@
 }
 
 
-double StringToDouble(const char* str, int flags, double empty_string_val) {
-  ScannerConstants* scanner_constants =
-      Isolate::Current()->scanner_constants();
+double StringToDouble(UnicodeCache* unicode_cache,
+                      const char* str, int flags, double empty_string_val) {
   const char* end = str + StrLength(str);
-  return InternalStringToDouble(scanner_constants, str, end, flags,
+  return InternalStringToDouble(unicode_cache, str, end, flags,
                                 empty_string_val);
 }
 
 
-double StringToDouble(Vector<const char> str,
+double StringToDouble(UnicodeCache* unicode_cache,
+                      Vector<const char> str,
                       int flags,
                       double empty_string_val) {
-  ScannerConstants* scanner_constants =
-      Isolate::Current()->scanner_constants();
   const char* end = str.start() + str.length();
-  return InternalStringToDouble(scanner_constants, str.start(), end, flags,
+  return InternalStringToDouble(unicode_cache, str.start(), end, flags,
                                 empty_string_val);
 }
 
diff --git a/src/conversions.h b/src/conversions.h
index 312e6ae..a14dc9a 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,8 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
+#include "scanner-base.h"
+
 namespace v8 {
 namespace internal {
 
@@ -91,15 +93,22 @@
 
 
 // Converts a string into a double value according to ECMA-262 9.3.1
-double StringToDouble(String* str, int flags, double empty_string_val = 0);
-double StringToDouble(Vector<const char> str,
+double StringToDouble(UnicodeCache* unicode_cache,
+                      String* str,
+                      int flags,
+                      double empty_string_val = 0);
+double StringToDouble(UnicodeCache* unicode_cache,
+                      Vector<const char> str,
                       int flags,
                       double empty_string_val = 0);
 // This version expects a zero-terminated character array.
-double StringToDouble(const char* str, int flags, double empty_string_val = 0);
+double StringToDouble(UnicodeCache* unicode_cache,
+                      const char* str,
+                      int flags,
+                      double empty_string_val = 0);
 
 // Converts a string into an integer.
-double StringToInt(String* str, int radix);
+double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
 
 // Converts a double to a string value according to ECMA-262 9.8.1.
 // The buffer should be large enough for any floating point number.
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index a7fffe0..b704417 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -70,6 +70,7 @@
   // Init the required fields only.
   result->sample.pc = NULL;
   result->sample.frames_count = 0;
+  result->sample.has_external_callback = false;
   return result;
 }
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index ef51950..3894748 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -184,11 +184,13 @@
 void ProfilerEventsProcessor::AddCurrentStack() {
   TickSampleEventRecord record;
   TickSample* sample = &record.sample;
-  sample->state = Isolate::Current()->current_vm_state();
+  Isolate* isolate = Isolate::Current();
+  sample->state = isolate->current_vm_state();
   sample->pc = reinterpret_cast<Address>(sample);  // Not NULL.
   sample->tos = NULL;
+  sample->has_external_callback = false;
   sample->frames_count = 0;
-  for (StackTraceFrameIterator it;
+  for (StackTraceFrameIterator it(isolate);
        !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
        it.Advance()) {
     sample->stack[sample->frames_count++] = it.frame()->pc();
diff --git a/src/cpu.h b/src/cpu.h
index ddc402f..e307302 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -53,6 +53,8 @@
   // Initializes the cpu architecture support. Called once at VM startup.
   static void Setup();
 
+  static bool SupportsCrankshaft();
+
   // Flush instruction cache.
   static void FlushICache(void* start, size_t size);
 
diff --git a/src/d8.gyp b/src/d8.gyp
index 901fd65..29212dd 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -61,6 +61,7 @@
       'variables': {
         'js_files': [
           'd8.js',
+          'macros.py',
         ],
       },
       'actions': [
@@ -72,7 +73,6 @@
           ],
           'outputs': [
             '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
-            '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
           ],
           'action': [
             'python',
diff --git a/src/data-flow.cc b/src/data-flow.cc
index 9c02ff4..6a3b05c 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -63,483 +63,4 @@
   current_value_ = val >> 1;
 }
 
-
-bool AssignedVariablesAnalyzer::Analyze(CompilationInfo* info) {
-  Scope* scope = info->scope();
-  int size = scope->num_parameters() + scope->num_stack_slots();
-  if (size == 0) return true;
-  AssignedVariablesAnalyzer analyzer(info, size);
-  return analyzer.Analyze();
-}
-
-
-AssignedVariablesAnalyzer::AssignedVariablesAnalyzer(CompilationInfo* info,
-                                                     int size)
-    : info_(info), av_(size) {
-}
-
-
-bool AssignedVariablesAnalyzer::Analyze() {
-  ASSERT(av_.length() > 0);
-  VisitStatements(info_->function()->body());
-  return !HasStackOverflow();
-}
-
-
-Variable* AssignedVariablesAnalyzer::FindSmiLoopVariable(ForStatement* stmt) {
-  // The loop must have all necessary parts.
-  if (stmt->init() == NULL || stmt->cond() == NULL || stmt->next() == NULL) {
-    return NULL;
-  }
-  // The initialization statement has to be a simple assignment.
-  Assignment* init = stmt->init()->StatementAsSimpleAssignment();
-  if (init == NULL) return NULL;
-
-  // We only deal with local variables.
-  Variable* loop_var = init->target()->AsVariableProxy()->AsVariable();
-  if (loop_var == NULL || !loop_var->IsStackAllocated()) return NULL;
-
-  // Don't try to get clever with const or dynamic variables.
-  if (loop_var->mode() != Variable::VAR) return NULL;
-
-  // The initial value has to be a smi.
-  Literal* init_lit = init->value()->AsLiteral();
-  if (init_lit == NULL || !init_lit->handle()->IsSmi()) return NULL;
-  int init_value = Smi::cast(*init_lit->handle())->value();
-
-  // The condition must be a compare of variable with <, <=, >, or >=.
-  CompareOperation* cond = stmt->cond()->AsCompareOperation();
-  if (cond == NULL) return NULL;
-  if (cond->op() != Token::LT
-      && cond->op() != Token::LTE
-      && cond->op() != Token::GT
-      && cond->op() != Token::GTE) return NULL;
-
-  // The lhs must be the same variable as in the init expression.
-  if (cond->left()->AsVariableProxy()->AsVariable() != loop_var) return NULL;
-
-  // The rhs must be a smi.
-  Literal* term_lit = cond->right()->AsLiteral();
-  if (term_lit == NULL || !term_lit->handle()->IsSmi()) return NULL;
-  int term_value = Smi::cast(*term_lit->handle())->value();
-
-  // The count operation updates the same variable as in the init expression.
-  CountOperation* update = stmt->next()->StatementAsCountOperation();
-  if (update == NULL) return NULL;
-  if (update->expression()->AsVariableProxy()->AsVariable() != loop_var) {
-    return NULL;
-  }
-
-  // The direction of the count operation must agree with the start and the end
-  // value. We currently do not allow the initial value to be the same as the
-  // terminal value. This _would_ be ok as long as the loop body never executes
-  // or executes exactly one time.
-  if (init_value == term_value) return NULL;
-  if (init_value < term_value && update->op() != Token::INC) return NULL;
-  if (init_value > term_value && update->op() != Token::DEC) return NULL;
-
-  // Check that the update operation cannot overflow the smi range. This can
-  // occur in the two cases where the loop bound is equal to the largest or
-  // smallest smi.
-  if (update->op() == Token::INC && term_value == Smi::kMaxValue) return NULL;
-  if (update->op() == Token::DEC && term_value == Smi::kMinValue) return NULL;
-
-  // Found a smi loop variable.
-  return loop_var;
-}
-
-int AssignedVariablesAnalyzer::BitIndex(Variable* var) {
-  ASSERT(var != NULL);
-  ASSERT(var->IsStackAllocated());
-  Slot* slot = var->AsSlot();
-  if (slot->type() == Slot::PARAMETER) {
-    return slot->index();
-  } else {
-    return info_->scope()->num_parameters() + slot->index();
-  }
-}
-
-
-void AssignedVariablesAnalyzer::RecordAssignedVar(Variable* var) {
-  ASSERT(var != NULL);
-  if (var->IsStackAllocated()) {
-    av_.Add(BitIndex(var));
-  }
-}
-
-
-void AssignedVariablesAnalyzer::MarkIfTrivial(Expression* expr) {
-  Variable* var = expr->AsVariableProxy()->AsVariable();
-  if (var != NULL &&
-      var->IsStackAllocated() &&
-      !var->is_arguments() &&
-      var->mode() != Variable::CONST &&
-      (var->is_this() || !av_.Contains(BitIndex(var)))) {
-    expr->AsVariableProxy()->MarkAsTrivial();
-  }
-}
-
-
-void AssignedVariablesAnalyzer::ProcessExpression(Expression* expr) {
-  BitVector saved_av(av_);
-  av_.Clear();
-  Visit(expr);
-  av_.Union(saved_av);
-}
-
-void AssignedVariablesAnalyzer::VisitBlock(Block* stmt) {
-  VisitStatements(stmt->statements());
-}
-
-
-void AssignedVariablesAnalyzer::VisitExpressionStatement(
-    ExpressionStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitEmptyStatement(EmptyStatement* stmt) {
-  // Do nothing.
-}
-
-
-void AssignedVariablesAnalyzer::VisitIfStatement(IfStatement* stmt) {
-  ProcessExpression(stmt->condition());
-  Visit(stmt->then_statement());
-  Visit(stmt->else_statement());
-}
-
-
-void AssignedVariablesAnalyzer::VisitContinueStatement(
-    ContinueStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitBreakStatement(BreakStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitReturnStatement(ReturnStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithEnterStatement(
-    WithEnterStatement* stmt) {
-  ProcessExpression(stmt->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWithExitStatement(
-    WithExitStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitSwitchStatement(SwitchStatement* stmt) {
-  BitVector result(av_);
-  av_.Clear();
-  Visit(stmt->tag());
-  result.Union(av_);
-  for (int i = 0; i < stmt->cases()->length(); i++) {
-    CaseClause* clause = stmt->cases()->at(i);
-    if (!clause->is_default()) {
-      av_.Clear();
-      Visit(clause->label());
-      result.Union(av_);
-    }
-    VisitStatements(clause->statements());
-  }
-  av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  ProcessExpression(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitWhileStatement(WhileStatement* stmt) {
-  ProcessExpression(stmt->cond());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitForStatement(ForStatement* stmt) {
-  if (stmt->init() != NULL) Visit(stmt->init());
-  if (stmt->cond() != NULL) ProcessExpression(stmt->cond());
-  if (stmt->next() != NULL) Visit(stmt->next());
-
-  // Process loop body. After visiting the loop body av_ contains
-  // the assigned variables of the loop body.
-  BitVector saved_av(av_);
-  av_.Clear();
-  Visit(stmt->body());
-
-  Variable* var = FindSmiLoopVariable(stmt);
-  if (var != NULL && !av_.Contains(BitIndex(var))) {
-    stmt->set_loop_variable(var);
-  }
-  av_.Union(saved_av);
-}
-
-
-void AssignedVariablesAnalyzer::VisitForInStatement(ForInStatement* stmt) {
-  ProcessExpression(stmt->each());
-  ProcessExpression(stmt->enumerable());
-  Visit(stmt->body());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryCatchStatement(
-    TryCatchStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->catch_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitTryFinallyStatement(
-    TryFinallyStatement* stmt) {
-  Visit(stmt->try_block());
-  Visit(stmt->finally_block());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDebuggerStatement(
-    DebuggerStatement* stmt) {
-  // Nothing to do.
-}
-
-
-void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitConditional(Conditional* expr) {
-  ASSERT(av_.IsEmpty());
-
-  Visit(expr->condition());
-
-  BitVector result(av_);
-  av_.Clear();
-  Visit(expr->then_expression());
-  result.Union(av_);
-
-  av_.Clear();
-  Visit(expr->else_expression());
-  av_.Union(result);
-}
-
-
-void AssignedVariablesAnalyzer::VisitVariableProxy(VariableProxy* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitLiteral(Literal* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitRegExpLiteral(RegExpLiteral* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitObjectLiteral(ObjectLiteral* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_.length());
-  for (int i = 0; i < expr->properties()->length(); i++) {
-    Visit(expr->properties()->at(i)->value());
-    result.Union(av_);
-    av_.Clear();
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitArrayLiteral(ArrayLiteral* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_.length());
-  for (int i = 0; i < expr->values()->length(); i++) {
-    Visit(expr->values()->at(i));
-    result.Union(av_);
-    av_.Clear();
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCatchExtensionObject(
-    CatchExtensionObject* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->key());
-  ProcessExpression(expr->value());
-}
-
-
-void AssignedVariablesAnalyzer::VisitAssignment(Assignment* expr) {
-  ASSERT(av_.IsEmpty());
-
-  // There are three kinds of assignments: variable assignments, property
-  // assignments, and reference errors (invalid left-hand sides).
-  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  Property* prop = expr->target()->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-
-  if (var != NULL) {
-    MarkIfTrivial(expr->value());
-    Visit(expr->value());
-    if (expr->is_compound()) {
-      // Left-hand side occurs also as an rvalue.
-      MarkIfTrivial(expr->target());
-      ProcessExpression(expr->target());
-    }
-    RecordAssignedVar(var);
-
-  } else if (prop != NULL) {
-    MarkIfTrivial(expr->value());
-    Visit(expr->value());
-    if (!prop->key()->IsPropertyName()) {
-      MarkIfTrivial(prop->key());
-      ProcessExpression(prop->key());
-    }
-    MarkIfTrivial(prop->obj());
-    ProcessExpression(prop->obj());
-
-  } else {
-    Visit(expr->target());
-  }
-}
-
-
-void AssignedVariablesAnalyzer::VisitThrow(Throw* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->exception());
-}
-
-
-void AssignedVariablesAnalyzer::VisitProperty(Property* expr) {
-  ASSERT(av_.IsEmpty());
-  if (!expr->key()->IsPropertyName()) {
-    MarkIfTrivial(expr->key());
-    Visit(expr->key());
-  }
-  MarkIfTrivial(expr->obj());
-  ProcessExpression(expr->obj());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCall(Call* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->expression());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallNew(CallNew* expr) {
-  ASSERT(av_.IsEmpty());
-  Visit(expr->expression());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitCallRuntime(CallRuntime* expr) {
-  ASSERT(av_.IsEmpty());
-  BitVector result(av_);
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    av_.Clear();
-    Visit(expr->arguments()->at(i));
-    result.Union(av_);
-  }
-  av_ = result;
-}
-
-
-void AssignedVariablesAnalyzer::VisitUnaryOperation(UnaryOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitIncrementOperation(
-    IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
-void AssignedVariablesAnalyzer::VisitCountOperation(CountOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  if (expr->is_prefix()) MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-
-  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-  if (var != NULL) RecordAssignedVar(var);
-}
-
-
-void AssignedVariablesAnalyzer::VisitBinaryOperation(BinaryOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->right());
-  Visit(expr->right());
-  MarkIfTrivial(expr->left());
-  ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareOperation(CompareOperation* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->right());
-  Visit(expr->right());
-  MarkIfTrivial(expr->left());
-  ProcessExpression(expr->left());
-}
-
-
-void AssignedVariablesAnalyzer::VisitCompareToNull(CompareToNull* expr) {
-  ASSERT(av_.IsEmpty());
-  MarkIfTrivial(expr->expression());
-  Visit(expr->expression());
-}
-
-
-void AssignedVariablesAnalyzer::VisitThisFunction(ThisFunction* expr) {
-  // Nothing to do.
-  ASSERT(av_.IsEmpty());
-}
-
-
-void AssignedVariablesAnalyzer::VisitDeclaration(Declaration* decl) {
-  UNREACHABLE();
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index 573d7d8..76cff88 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -335,44 +335,6 @@
   List<T*> queue_;
 };
 
-
-// Computes the set of assigned variables and annotates variables proxies
-// that are trivial sub-expressions and for-loops where the loop variable
-// is guaranteed to be a smi.
-class AssignedVariablesAnalyzer : public AstVisitor {
- public:
-  static bool Analyze(CompilationInfo* info);
-
- private:
-  AssignedVariablesAnalyzer(CompilationInfo* info, int bits);
-  bool Analyze();
-
-  Variable* FindSmiLoopVariable(ForStatement* stmt);
-
-  int BitIndex(Variable* var);
-
-  void RecordAssignedVar(Variable* var);
-
-  void MarkIfTrivial(Expression* expr);
-
-  // Visits an expression saving the accumulator before, clearing
-  // it before visting and restoring it after visiting.
-  void ProcessExpression(Expression* expr);
-
-  // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
-  CompilationInfo* info_;
-
-  // Accumulator for assigned variables set.
-  BitVector av_;
-
-  DISALLOW_COPY_AND_ASSIGN(AssignedVariablesAnalyzer);
-};
-
-
 } }  // namespace v8::internal
 
 
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index ac28c62..7f8fac8 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,9 +34,11 @@
 namespace internal {
 
 template <typename Char>
-bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
+bool DateParser::Parse(Vector<Char> str,
+                       FixedArray* out,
+                       UnicodeCache* unicode_cache) {
   ASSERT(out->length() >= OUTPUT_SIZE);
-  InputReader<Char> in(str);
+  InputReader<Char> in(unicode_cache, str);
   TimeZoneComposer tz;
   TimeComposer time;
   DayComposer day;
diff --git a/src/dateparser.h b/src/dateparser.h
index 51109ee..9d29715 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,7 +49,7 @@
   // [7]: UTC offset in seconds, or null value if no timezone specified
   // If parsing fails, return false (content of output array is not defined).
   template <typename Char>
-  static bool Parse(Vector<Char> str, FixedArray* output);
+  static bool Parse(Vector<Char> str, FixedArray* output, UnicodeCache* cache);
 
   enum {
     YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
@@ -67,11 +67,11 @@
   template <typename Char>
   class InputReader BASE_EMBEDDED {
    public:
-    explicit InputReader(Vector<Char> s)
+    InputReader(UnicodeCache* unicode_cache, Vector<Char> s)
         : index_(0),
           buffer_(s),
           has_read_number_(false),
-          scanner_constants_(Isolate::Current()->scanner_constants()) {
+          unicode_cache_(unicode_cache) {
       Next();
     }
 
@@ -122,7 +122,7 @@
     }
 
     bool SkipWhiteSpace() {
-      if (scanner_constants_->IsWhiteSpace(ch_)) {
+      if (unicode_cache_->IsWhiteSpace(ch_)) {
         Next();
         return true;
       }
@@ -158,7 +158,7 @@
     Vector<Char> buffer_;
     bool has_read_number_;
     uint32_t ch_;
-    ScannerConstants* scanner_constants_;
+    UnicodeCache* unicode_cache_;
   };
 
   enum KeywordType { INVALID, MONTH_NAME, TIME_ZONE_NAME, AM_PM };
diff --git a/src/debug.cc b/src/debug.cc
index bc532ef..3691333 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -167,7 +167,6 @@
       Address target = original_rinfo()->target_address();
       Code* code = Code::GetCodeFromTargetAddress(target);
       if ((code->is_inline_cache_stub() &&
-           !code->is_binary_op_stub() &&
            !code->is_type_recording_binary_op_stub() &&
            !code->is_compare_ic_stub()) ||
           RelocInfo::IsConstructCall(rmode())) {
@@ -478,21 +477,6 @@
     // calling convention used by the call site.
     Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
-
-    // For stubs that refer back to an inlined version clear the cached map for
-    // the inlined case to always go through the IC. As long as the break point
-    // is set the patching performed by the runtime system will take place in
-    // the code copy and will therefore have no effect on the running code
-    // keeping it from using the inlined code.
-    if (code->is_keyed_load_stub()) {
-      KeyedLoadIC::ClearInlinedVersion(pc());
-    } else if (code->is_keyed_store_stub()) {
-      KeyedStoreIC::ClearInlinedVersion(pc());
-    } else if (code->is_load_stub()) {
-      LoadIC::ClearInlinedVersion(pc());
-    } else if (code->is_store_stub()) {
-      StoreIC::ClearInlinedVersion(pc());
-    }
   }
 }
 
@@ -500,20 +484,6 @@
 void BreakLocationIterator::ClearDebugBreakAtIC() {
   // Patch the code to the original invoke.
   rinfo()->set_target_address(original_rinfo()->target_address());
-
-  RelocInfo::Mode mode = rmode();
-  if (RelocInfo::IsCodeTarget(mode)) {
-    AssertNoAllocation nogc;
-    Address target = original_rinfo()->target_address();
-    Code* code = Code::GetCodeFromTargetAddress(target);
-
-    // Restore the inlined version of keyed stores to get back to the
-    // fast case.  We need to patch back the keyed store because no
-    // patching happens when running normally.  For keyed loads, the
-    // map check will get patched back when running normally after ICs
-    // have been cleared at GC.
-    if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
-  }
 }
 
 
@@ -810,7 +780,7 @@
     Handle<Object> message = MessageHandler::MakeMessageObject(
         "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
         Handle<String>(), Handle<JSArray>());
-    MessageHandler::ReportMessage(NULL, message);
+    MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
     return false;
   }
 
@@ -844,6 +814,7 @@
   HandleScope scope(isolate_);
   Handle<Context> context =
       isolate_->bootstrapper()->CreateEnvironment(
+          isolate_,
           Handle<Object>::null(),
           v8::Handle<ObjectTemplate>(),
           NULL);
@@ -917,24 +888,20 @@
 }
 
 
-// This remains a static method so that generated code can call it.
-Object* Debug::Break(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
-
-  Debug* debug = isolate->debug();
-  Heap* heap = isolate->heap();
-  HandleScope scope(isolate);
+Object* Debug::Break(Arguments args) {
+  Heap* heap = isolate_->heap();
+  HandleScope scope(isolate_);
   ASSERT(args.length() == 0);
 
-  debug->thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
+  thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
 
   // Get the top-most JavaScript frame.
-  JavaScriptFrameIterator it;
+  JavaScriptFrameIterator it(isolate_);
   JavaScriptFrame* frame = it.frame();
 
   // Just continue if breaks are disabled or debugger cannot be loaded.
-  if (debug->disable_break() || !debug->Load()) {
-    debug->SetAfterBreakTarget(frame);
+  if (disable_break() || !Load()) {
+    SetAfterBreakTarget(frame);
     return heap->undefined_value();
   }
 
@@ -945,7 +912,7 @@
   }
 
   // Postpone interrupt during breakpoint processing.
-  PostponeInterruptsScope postpone(isolate);
+  PostponeInterruptsScope postpone(isolate_);
 
   // Get the debug info (create it if it does not exist).
   Handle<SharedFunctionInfo> shared =
@@ -958,10 +925,10 @@
   break_location_iterator.FindBreakLocationFromAddress(frame->pc());
 
   // Check whether step next reached a new statement.
-  if (!debug->StepNextContinue(&break_location_iterator, frame)) {
+  if (!StepNextContinue(&break_location_iterator, frame)) {
     // Decrease steps left if performing multiple steps.
-    if (debug->thread_local_.step_count_ > 0) {
-      debug->thread_local_.step_count_--;
+    if (thread_local_.step_count_ > 0) {
+      thread_local_.step_count_--;
     }
   }
 
@@ -971,56 +938,55 @@
   if (break_location_iterator.HasBreakPoint()) {
     Handle<Object> break_point_objects =
         Handle<Object>(break_location_iterator.BreakPointObjects());
-    break_points_hit = debug->CheckBreakPoints(break_point_objects);
+    break_points_hit = CheckBreakPoints(break_point_objects);
   }
 
   // If step out is active skip everything until the frame where we need to step
   // out to is reached, unless real breakpoint is hit.
-  if (debug->StepOutActive() && frame->fp() != debug->step_out_fp() &&
+  if (StepOutActive() && frame->fp() != step_out_fp() &&
       break_points_hit->IsUndefined() ) {
       // Step count should always be 0 for StepOut.
-      ASSERT(debug->thread_local_.step_count_ == 0);
+      ASSERT(thread_local_.step_count_ == 0);
   } else if (!break_points_hit->IsUndefined() ||
-             (debug->thread_local_.last_step_action_ != StepNone &&
-              debug->thread_local_.step_count_ == 0)) {
+             (thread_local_.last_step_action_ != StepNone &&
+              thread_local_.step_count_ == 0)) {
     // Notify debugger if a real break point is triggered or if performing
     // single stepping with no more steps to perform. Otherwise do another step.
 
     // Clear all current stepping setup.
-    debug->ClearStepping();
+    ClearStepping();
 
     // Notify the debug event listeners.
-    isolate->debugger()->OnDebugBreak(break_points_hit, false);
-  } else if (debug->thread_local_.last_step_action_ != StepNone) {
+    isolate_->debugger()->OnDebugBreak(break_points_hit, false);
+  } else if (thread_local_.last_step_action_ != StepNone) {
     // Hold on to last step action as it is cleared by the call to
     // ClearStepping.
-    StepAction step_action = debug->thread_local_.last_step_action_;
-    int step_count = debug->thread_local_.step_count_;
+    StepAction step_action = thread_local_.last_step_action_;
+    int step_count = thread_local_.step_count_;
 
     // Clear all current stepping setup.
-    debug->ClearStepping();
+    ClearStepping();
 
     // Set up for the remaining steps.
-    debug->PrepareStep(step_action, step_count);
+    PrepareStep(step_action, step_count);
   }
 
-  if (debug->thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
-    debug->SetAfterBreakTarget(frame);
-  } else if (debug->thread_local_.frame_drop_mode_ ==
+  if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
+    SetAfterBreakTarget(frame);
+  } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_IC_CALL) {
     // We must have been calling IC stub. Do not go there anymore.
-    Code* plain_return =
-        Isolate::Current()->builtins()->builtin(
-            Builtins::kPlainReturn_LiveEdit);
-    debug->thread_local_.after_break_target_ = plain_return->entry();
-  } else if (debug->thread_local_.frame_drop_mode_ ==
+    Code* plain_return = isolate_->builtins()->builtin(
+        Builtins::kPlainReturn_LiveEdit);
+    thread_local_.after_break_target_ = plain_return->entry();
+  } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
     // Debug break slot stub does not return normally, instead it manually
     // cleans the stack and jumps. We should patch the jump address.
-    Code* plain_return = Isolate::Current()->builtins()->builtin(
+    Code* plain_return = isolate_->builtins()->builtin(
         Builtins::kFrameDropper_LiveEdit);
-    debug->thread_local_.after_break_target_ = plain_return->entry();
-  } else if (debug->thread_local_.frame_drop_mode_ ==
+    thread_local_.after_break_target_ = plain_return->entry();
+  } else if (thread_local_.frame_drop_mode_ ==
       FRAME_DROPPED_IN_DIRECT_CALL) {
     // Nothing to do, after_break_target is not used here.
   } else {
@@ -1031,6 +997,11 @@
 }
 
 
+RUNTIME_FUNCTION(Object*, Debug_Break) {
+  return isolate->debug()->Break(args);
+}
+
+
 // Check the break point objects for whether one or more are actually
 // triggered. This function returns a JSArray with the break point objects
 // which is triggered.
@@ -1224,7 +1195,7 @@
     // If there is no JavaScript stack don't do anything.
     return;
   }
-  for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) {
+  for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     if (frame->HasHandler()) {
       Handle<SharedFunctionInfo> shared =
@@ -1280,7 +1251,7 @@
     // If there is no JavaScript stack don't do anything.
     return;
   }
-  JavaScriptFrameIterator frames_it(id);
+  JavaScriptFrameIterator frames_it(isolate_, id);
   JavaScriptFrame* frame = frames_it.frame();
 
   // First of all ensure there is one-shot break points in the top handler
@@ -1777,7 +1748,7 @@
   Handle<Code> original_code(debug_info->original_code());
 #ifdef DEBUG
   // Get the code which is actually executing.
-  Handle<Code> frame_code(frame->LookupCode(isolate_));
+  Handle<Code> frame_code(frame->LookupCode());
   ASSERT(frame_code.is_identical_to(code));
 #endif
 
@@ -1859,7 +1830,7 @@
   Handle<Code> code(debug_info->code());
 #ifdef DEBUG
   // Get the code which is actually executing.
-  Handle<Code> frame_code(frame->LookupCode(Isolate::Current()));
+  Handle<Code> frame_code(frame->LookupCode());
   ASSERT(frame_code.is_identical_to(code));
 #endif
 
diff --git a/src/debug.h b/src/debug.h
index d512595..9366fc3 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -228,7 +228,7 @@
   void PreemptionWhileInDebugger();
   void Iterate(ObjectVisitor* v);
 
-  static Object* Break(RUNTIME_CALLING_CONVENTION);
+  Object* Break(Arguments args);
   void SetBreakPoint(Handle<SharedFunctionInfo> shared,
                      Handle<Object> break_point_object,
                      int* source_position);
@@ -548,6 +548,9 @@
 };
 
 
+DECLARE_RUNTIME_FUNCTION(Object*, Debug_Break);
+
+
 // Message delivered to the message handler callback. This is either a debugger
 // event or the response to a command.
 class MessageImpl: public v8::Debug::Message {
@@ -860,6 +863,7 @@
   EnterDebugger()
       : isolate_(Isolate::Current()),
         prev_(isolate_->debug()->debugger_entry()),
+        it_(isolate_),
         has_js_frames_(!it_.done()),
         save_(isolate_) {
     Debug* debug = isolate_->debug();
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 4372af0..2fc0e47 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -199,8 +199,7 @@
 }
 
 
-void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer,
-                                      Isolate* isolate) {
+void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
   deoptimizer->DoComputeOutputFrames();
 }
 
@@ -219,8 +218,7 @@
       fp_to_sp_delta_(fp_to_sp_delta),
       output_count_(0),
       output_(NULL),
-      integer32_values_(NULL),
-      double_values_(NULL) {
+      deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
     PrintF("**** DEOPT: ");
     function->PrintName();
@@ -259,8 +257,6 @@
 
 Deoptimizer::~Deoptimizer() {
   ASSERT(input_ == NULL && output_ == NULL);
-  delete[] integer32_values_;
-  delete[] double_values_;
 }
 
 
@@ -391,13 +387,8 @@
   int count = iterator.Next();
   ASSERT(output_ == NULL);
   output_ = new FrameDescription*[count];
-  // Per-frame lists of untagged and unboxed int32 and double values.
-  integer32_values_ = new List<ValueDescriptionInteger32>[count];
-  double_values_ = new List<ValueDescriptionDouble>[count];
   for (int i = 0; i < count; ++i) {
     output_[i] = NULL;
-    integer32_values_[i].Initialize(0);
-    double_values_[i].Initialize(0);
   }
   output_count_ = count;
 
@@ -425,37 +416,19 @@
 }
 
 
-void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
-  // We need to adjust the stack index by one for the top-most frame.
-  int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
-  List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
-  for (int i = 0; i < ints->length(); i++) {
-    ValueDescriptionInteger32 value = ints->at(i);
-    double val = static_cast<double>(value.int32_value());
-    InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
+void Deoptimizer::MaterializeHeapNumbers() {
+  for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
+    HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
+    Handle<Object> num = isolate_->factory()->NewNumber(d.value());
+    if (FLAG_trace_deopt) {
+      PrintF("Materializing a new heap number %p [%e] in slot %p\n",
+             reinterpret_cast<void*>(*num),
+             d.value(),
+             d.slot_address());
+    }
+
+    Memory::Object_at(d.slot_address()) = *num;
   }
-
-  // Iterate over double values and convert them to a heap number.
-  List<ValueDescriptionDouble>* doubles = &double_values_[index];
-  for (int i = 0; i < doubles->length(); ++i) {
-    ValueDescriptionDouble value = doubles->at(i);
-    InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
-                          extra_slot_count);
-  }
-}
-
-
-void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
-                                        int stack_index,
-                                        double val,
-                                        int extra_slot_count) {
-  // Add one to the TOS index to take the 'state' pushed before jumping
-  // to the stub that calls Runtime::NotifyDeoptimized into account.
-  int tos_index = stack_index + extra_slot_count;
-  int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
-  if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
-  Handle<Object> num = isolate_->factory()->NewNumber(val);
-  frame->SetExpression(index, *num);
 }
 
 
@@ -501,7 +474,6 @@
       int input_reg = iterator->Next();
       intptr_t value = input_->GetRegister(input_reg);
       bool is_smi = Smi::IsValid(value);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF(
             "    0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
@@ -518,9 +490,8 @@
       } else {
         // We save the untagged value on the side and store a GC-safe
         // temporary placeholder in the frame.
-        AddInteger32Value(frame_index,
-                          output_index,
-                          static_cast<int32_t>(value));
+        AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+                       static_cast<double>(static_cast<int32_t>(value)));
         output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       }
       return;
@@ -529,7 +500,6 @@
     case Translation::DOUBLE_REGISTER: {
       int input_reg = iterator->Next();
       double value = input_->GetDoubleRegister(input_reg);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
                output_[frame_index]->GetTop() + output_offset,
@@ -539,7 +509,7 @@
       }
       // We save the untagged value on the side and store a GC-safe
       // temporary placeholder in the frame.
-      AddDoubleValue(frame_index, output_index, value);
+      AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
       output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       return;
     }
@@ -567,7 +537,6 @@
           input_->GetOffsetFromSlotIndex(this, input_slot_index);
       intptr_t value = input_->GetFrameSlot(input_offset);
       bool is_smi = Smi::IsValid(value);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
@@ -584,9 +553,8 @@
       } else {
         // We save the untagged value on the side and store a GC-safe
         // temporary placeholder in the frame.
-        AddInteger32Value(frame_index,
-                          output_index,
-                          static_cast<int32_t>(value));
+        AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
+                       static_cast<double>(static_cast<int32_t>(value)));
         output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       }
       return;
@@ -597,7 +565,6 @@
       unsigned input_offset =
           input_->GetOffsetFromSlotIndex(this, input_slot_index);
       double value = input_->GetDoubleFrameSlot(input_offset);
-      unsigned output_index = output_offset / kPointerSize;
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
                output_[frame_index]->GetTop() + output_offset,
@@ -607,7 +574,7 @@
       }
       // We save the untagged value on the side and store a GC-safe
       // temporary placeholder in the frame.
-      AddDoubleValue(frame_index, output_index, value);
+      AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
       output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
       return;
     }
@@ -911,19 +878,11 @@
 }
 
 
-void Deoptimizer::AddInteger32Value(int frame_index,
-                                    int slot_index,
-                                    int32_t value) {
-  ValueDescriptionInteger32 value_desc(slot_index, value);
-  integer32_values_[frame_index].Add(value_desc);
-}
-
-
-void Deoptimizer::AddDoubleValue(int frame_index,
-                                 int slot_index,
+void Deoptimizer::AddDoubleValue(intptr_t slot_address,
                                  double value) {
-  ValueDescriptionDouble value_desc(slot_index, value);
-  double_values_[frame_index].Add(value_desc);
+  HeapNumberMaterializationDescriptor value_desc(
+      reinterpret_cast<Address>(slot_address), value);
+  deferred_heap_numbers_.Add(value_desc);
 }
 
 
@@ -934,7 +893,7 @@
   // isn't meant to be serialized at all.
   ASSERT(!Serializer::enabled());
 
-  MacroAssembler masm(NULL, 16 * KB);
+  MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
   masm.set_emit_debug_code(false);
   GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
   CodeDesc desc;
@@ -1195,4 +1154,103 @@
 }
 
 
+// We can't intermix stack decoding and allocations because
+// deoptimization infrastracture is not GC safe.
+// Thus we build a temporary structure in malloced space.
+SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
+                                            DeoptimizationInputData* data,
+                                            JavaScriptFrame* frame) {
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+
+  switch (opcode) {
+    case Translation::BEGIN:
+    case Translation::FRAME:
+      // Peeled off before getting here.
+      break;
+
+    case Translation::ARGUMENTS_OBJECT:
+      // This can be only emitted for local slots not for argument slots.
+      break;
+
+    case Translation::REGISTER:
+    case Translation::INT32_REGISTER:
+    case Translation::DOUBLE_REGISTER:
+    case Translation::DUPLICATE:
+      // We are at safepoint which corresponds to call.  All registers are
+      // saved by caller so there would be no live registers at this
+      // point. Thus these translation commands should not be used.
+      break;
+
+    case Translation::STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::TAGGED);
+    }
+
+    case Translation::INT32_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::INT32);
+    }
+
+    case Translation::DOUBLE_STACK_SLOT: {
+      int slot_index = iterator->Next();
+      Address slot_addr = SlotAddress(frame, slot_index);
+      return SlotRef(slot_addr, SlotRef::DOUBLE);
+    }
+
+    case Translation::LITERAL: {
+      int literal_index = iterator->Next();
+      return SlotRef(data->LiteralArray()->get(literal_index));
+    }
+  }
+
+  UNREACHABLE();
+  return SlotRef();
+}
+
+
+void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                             int inlined_frame_index,
+                                             Vector<SlotRef>* args_slots) {
+  AssertNoAllocation no_gc;
+  int deopt_index = AstNode::kNoNumber;
+  DeoptimizationInputData* data =
+      static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
+  TranslationIterator it(data->TranslationByteArray(),
+                         data->TranslationIndex(deopt_index)->value());
+  Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
+  ASSERT(opcode == Translation::BEGIN);
+  int frame_count = it.Next();
+  USE(frame_count);
+  ASSERT(frame_count > inlined_frame_index);
+  int frames_to_skip = inlined_frame_index;
+  while (true) {
+    opcode = static_cast<Translation::Opcode>(it.Next());
+    // Skip over operands to advance to the next opcode.
+    it.Skip(Translation::NumberOfOperandsFor(opcode));
+    if (opcode == Translation::FRAME) {
+      if (frames_to_skip == 0) {
+        // We reached the frame corresponding to the inlined function
+        // in question.  Process the translation commands for the
+        // arguments.
+        //
+        // Skip the translation command for the receiver.
+        it.Skip(Translation::NumberOfOperandsFor(
+            static_cast<Translation::Opcode>(it.Next())));
+        // Compute slots for arguments.
+        for (int i = 0; i < args_slots->length(); ++i) {
+          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
+        }
+        return;
+      }
+      frames_to_skip--;
+    }
+  }
+
+  UNREACHABLE();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index a53de3d..cb82f44 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -42,38 +42,17 @@
 class DeoptimizingCodeListNode;
 
 
-class ValueDescription BASE_EMBEDDED {
+class HeapNumberMaterializationDescriptor BASE_EMBEDDED {
  public:
-  explicit ValueDescription(int index) : stack_index_(index) { }
-  int stack_index() const { return stack_index_; }
+  HeapNumberMaterializationDescriptor(Address slot_address, double val)
+      : slot_address_(slot_address), val_(val) { }
+
+  Address slot_address() const { return slot_address_; }
+  double value() const { return val_; }
 
  private:
-  // Offset relative to the top of the stack.
-  int stack_index_;
-};
-
-
-class ValueDescriptionInteger32: public ValueDescription {
- public:
-  ValueDescriptionInteger32(int index, int32_t value)
-      : ValueDescription(index), int32_value_(value) { }
-  int32_t int32_value() const { return int32_value_; }
-
- private:
-  // Raw value.
-  int32_t int32_value_;
-};
-
-
-class ValueDescriptionDouble: public ValueDescription {
- public:
-  ValueDescriptionDouble(int index, double value)
-      : ValueDescription(index), double_value_(value) { }
-  double double_value() const { return double_value_; }
-
- private:
-  // Raw value.
-  double double_value_;
+  Address slot_address_;
+  double val_;
 };
 
 
@@ -190,9 +169,9 @@
 
   ~Deoptimizer();
 
-  void InsertHeapNumberValues(int index, JavaScriptFrame* frame);
+  void MaterializeHeapNumbers();
 
-  static void ComputeOutputFrames(Deoptimizer* deoptimizer, Isolate* isolate);
+  static void ComputeOutputFrames(Deoptimizer* deoptimizer);
 
   static Address GetDeoptimizationEntry(int id, BailoutType type);
   static int GetDeoptimizationId(Address addr, BailoutType type);
@@ -277,13 +256,7 @@
 
   Object* ComputeLiteral(int index) const;
 
-  void InsertHeapNumberValue(JavaScriptFrame* frame,
-                             int stack_index,
-                             double val,
-                             int extra_slot_count);
-
-  void AddInteger32Value(int frame_index, int slot_index, int32_t value);
-  void AddDoubleValue(int frame_index, int slot_index, double value);
+  void AddDoubleValue(intptr_t slot_address, double value);
 
   static LargeObjectChunk* CreateCode(BailoutType type);
   static void GenerateDeoptimizationEntries(
@@ -310,8 +283,7 @@
   // Array of output frame descriptions.
   FrameDescription** output_;
 
-  List<ValueDescriptionInteger32>* integer32_values_;
-  List<ValueDescriptionDouble>* double_values_;
+  List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
 
   static int table_entry_size_;
 
@@ -552,6 +524,78 @@
 };
 
 
+class SlotRef BASE_EMBEDDED {
+ public:
+  enum SlotRepresentation {
+    UNKNOWN,
+    TAGGED,
+    INT32,
+    DOUBLE,
+    LITERAL
+  };
+
+  SlotRef()
+      : addr_(NULL), representation_(UNKNOWN) { }
+
+  SlotRef(Address addr, SlotRepresentation representation)
+      : addr_(addr), representation_(representation) { }
+
+  explicit SlotRef(Object* literal)
+      : literal_(literal), representation_(LITERAL) { }
+
+  Handle<Object> GetValue() {
+    switch (representation_) {
+      case TAGGED:
+        return Handle<Object>(Memory::Object_at(addr_));
+
+      case INT32: {
+        int value = Memory::int32_at(addr_);
+        if (Smi::IsValid(value)) {
+          return Handle<Object>(Smi::FromInt(value));
+        } else {
+          return Isolate::Current()->factory()->NewNumberFromInt(value);
+        }
+      }
+
+      case DOUBLE: {
+        double value = Memory::double_at(addr_);
+        return Isolate::Current()->factory()->NewNumber(value);
+      }
+
+      case LITERAL:
+        return literal_;
+
+      default:
+        UNREACHABLE();
+        return Handle<Object>::null();
+    }
+  }
+
+  static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                             int inlined_frame_index,
+                                             Vector<SlotRef>* args_slots);
+
+ private:
+  Address addr_;
+  Handle<Object> literal_;
+  SlotRepresentation representation_;
+
+  static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
+    if (slot_index >= 0) {
+      const int offset = JavaScriptFrameConstants::kLocal0Offset;
+      return frame->fp() + offset - (slot_index * kPointerSize);
+    } else {
+      const int offset = JavaScriptFrameConstants::kLastParameterOffset;
+      return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
+    }
+  }
+
+  static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
+                                            DeoptimizationInputData* data,
+                                            JavaScriptFrame* frame);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_DEOPTIMIZER_H_
diff --git a/src/disassembler.cc b/src/disassembler.cc
index d142ef6..65e1668 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -28,7 +28,7 @@
 #include "v8.h"
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "disasm.h"
@@ -282,7 +282,8 @@
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
         }
-      } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
+      } else if (rmode == RelocInfo::RUNTIME_ENTRY &&
+                 Isolate::Current()->deoptimizer_data() != NULL) {
         // A runtime entry reloinfo might be a deoptimization bailout.
         Address addr = relocinfo.target_address();
         int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
diff --git a/src/execution.cc b/src/execution.cc
index 98c8b68..eb26438 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,7 @@
 
 #include "api.h"
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime-profiler.h"
 #include "simulator.h"
@@ -199,6 +199,8 @@
 
 Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
   ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
+  Factory* factory = isolate->factory();
 
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a function.
@@ -206,7 +208,7 @@
   // Regular expressions can be called as functions in both Firefox
   // and Safari so we allow it too.
   if (object->IsJSRegExp()) {
-    Handle<String> exec = FACTORY->exec_symbol();
+    Handle<String> exec = factory->exec_symbol();
     // TODO(lrn): Bug 617.  We should use the default function here, not the
     // one on the RegExp object.
     Object* exec_function;
@@ -214,7 +216,7 @@
       // This can lose an exception, but the alternative is to put a failure
       // object in a handle, which is not GC safe.
       if (!maybe_exec_function->ToObject(&exec_function)) {
-        return FACTORY->undefined_value();
+        return factory->undefined_value();
       }
     }
     return Handle<Object>(exec_function);
@@ -225,15 +227,16 @@
   if (object->IsHeapObject() &&
       HeapObject::cast(*object)->map()->has_instance_call_handler()) {
     return Handle<JSFunction>(
-        Isolate::Current()->global_context()->call_as_function_delegate());
+        isolate->global_context()->call_as_function_delegate());
   }
 
-  return FACTORY->undefined_value();
+  return factory->undefined_value();
 }
 
 
 Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
   ASSERT(!object->IsJSFunction());
+  Isolate* isolate = Isolate::Current();
 
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a constructor.
@@ -243,10 +246,10 @@
   if (object->IsHeapObject() &&
       HeapObject::cast(*object)->map()->has_instance_call_handler()) {
     return Handle<JSFunction>(
-        Isolate::Current()->global_context()->call_as_constructor_delegate());
+        isolate->global_context()->call_as_constructor_delegate());
   }
 
-  return FACTORY->undefined_value();
+  return isolate->factory()->undefined_value();
 }
 
 
@@ -467,10 +470,11 @@
 
 #define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception)            \
   do {                                                                         \
+    Isolate* isolate = Isolate::Current();                                     \
     Object** args[argc] = argv;                                                \
     ASSERT(has_pending_exception != NULL);                                     \
-    return Call(Isolate::Current()->name##_fun(),                              \
-                Isolate::Current()->js_builtins_object(), argc, args,          \
+    return Call(isolate->name##_fun(),                                         \
+                isolate->js_builtins_object(), argc, args,                     \
                 has_pending_exception);                                        \
   } while (false)
 
@@ -549,20 +553,23 @@
 
 
 Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
+  Isolate* isolate = string->GetIsolate();
+  Factory* factory = isolate->factory();
+
   int int_index = static_cast<int>(index);
   if (int_index < 0 || int_index >= string->length()) {
-    return FACTORY->undefined_value();
+    return factory->undefined_value();
   }
 
   Handle<Object> char_at =
-      GetProperty(Isolate::Current()->js_builtins_object(),
-                  FACTORY->char_at_symbol());
+      GetProperty(isolate->js_builtins_object(),
+                  factory->char_at_symbol());
   if (!char_at->IsJSFunction()) {
-    return FACTORY->undefined_value();
+    return factory->undefined_value();
   }
 
   bool caught_exception;
-  Handle<Object> index_object = FACTORY->NewNumberFromInt(int_index);
+  Handle<Object> index_object = factory->NewNumberFromInt(int_index);
   Object** index_arg[] = { index_object.location() };
   Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
                                   string,
@@ -570,7 +577,7 @@
                                   index_arg,
                                   &caught_exception);
   if (caught_exception) {
-    return FACTORY->undefined_value();
+    return factory->undefined_value();
   }
   return result;
 }
@@ -578,17 +585,18 @@
 
 Handle<JSFunction> Execution::InstantiateFunction(
     Handle<FunctionTemplateInfo> data, bool* exc) {
+  Isolate* isolate = data->GetIsolate();
   // Fast case: see if the function has already been instantiated
   int serial_number = Smi::cast(data->serial_number())->value();
   Object* elm =
-      Isolate::Current()->global_context()->function_cache()->
+      isolate->global_context()->function_cache()->
           GetElementNoExceptionThrown(serial_number);
   if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
   // The function has not yet been instantiated in this context; do it.
   Object** args[1] = { Handle<Object>::cast(data).location() };
   Handle<Object> result =
-      Call(Isolate::Current()->instantiate_fun(),
-           Isolate::Current()->js_builtins_object(), 1, args, exc);
+      Call(isolate->instantiate_fun(),
+           isolate->js_builtins_object(), 1, args, exc);
   if (*exc) return Handle<JSFunction>::null();
   return Handle<JSFunction>::cast(result);
 }
@@ -596,12 +604,13 @@
 
 Handle<JSObject> Execution::InstantiateObject(Handle<ObjectTemplateInfo> data,
                                               bool* exc) {
+  Isolate* isolate = data->GetIsolate();
   if (data->property_list()->IsUndefined() &&
       !data->constructor()->IsUndefined()) {
     // Initialization to make gcc happy.
     Object* result = NULL;
     {
-      HandleScope scope;
+      HandleScope scope(isolate);
       Handle<FunctionTemplateInfo> cons_template =
           Handle<FunctionTemplateInfo>(
               FunctionTemplateInfo::cast(data->constructor()));
@@ -616,8 +625,8 @@
   } else {
     Object** args[1] = { Handle<Object>::cast(data).location() };
     Handle<Object> result =
-        Call(Isolate::Current()->instantiate_fun(),
-             Isolate::Current()->js_builtins_object(), 1, args, exc);
+        Call(isolate->instantiate_fun(),
+             isolate->js_builtins_object(), 1, args, exc);
     if (*exc) return Handle<JSObject>::null();
     return Handle<JSObject>::cast(result);
   }
@@ -627,9 +636,10 @@
 void Execution::ConfigureInstance(Handle<Object> instance,
                                   Handle<Object> instance_template,
                                   bool* exc) {
+  Isolate* isolate = Isolate::Current();
   Object** args[2] = { instance.location(), instance_template.location() };
-  Execution::Call(Isolate::Current()->configure_instance_fun(),
-                  Isolate::Current()->js_builtins_object(), 2, args, exc);
+  Execution::Call(isolate->configure_instance_fun(),
+                  isolate->js_builtins_object(), 2, args, exc);
 }
 
 
@@ -637,6 +647,7 @@
                                             Handle<JSFunction> fun,
                                             Handle<Object> pos,
                                             Handle<Object> is_global) {
+  Isolate* isolate = fun->GetIsolate();
   const int argc = 4;
   Object** args[argc] = { recv.location(),
                           Handle<Object>::cast(fun).location(),
@@ -644,10 +655,13 @@
                           is_global.location() };
   bool caught_exception = false;
   Handle<Object> result =
-      TryCall(Isolate::Current()->get_stack_trace_line_fun(),
-              Isolate::Current()->js_builtins_object(), argc, args,
+      TryCall(isolate->get_stack_trace_line_fun(),
+              isolate->js_builtins_object(), argc, args,
               &caught_exception);
-  if (caught_exception || !result->IsString()) return FACTORY->empty_symbol();
+  if (caught_exception || !result->IsString()) {
+      return isolate->factory()->empty_symbol();
+  }
+
   return Handle<String>::cast(result);
 }
 
@@ -697,7 +711,7 @@
   }
 
   {
-    JavaScriptFrameIterator it;
+    JavaScriptFrameIterator it(isolate);
     ASSERT(!it.done());
     Object* fun = it.frame()->function();
     if (fun && fun->IsJSFunction()) {
@@ -728,10 +742,11 @@
 }
 
 void Execution::ProcessDebugMesssages(bool debug_command_only) {
+  Isolate* isolate = Isolate::Current();
   // Clear the debug command request flag.
-  Isolate::Current()->stack_guard()->Continue(DEBUGCOMMAND);
+  isolate->stack_guard()->Continue(DEBUGCOMMAND);
 
-  HandleScope scope;
+  HandleScope scope(isolate);
   // Enter the debugger. Just continue if we fail to enter the debugger.
   EnterDebugger debugger;
   if (debugger.FailedToEnter()) {
@@ -740,8 +755,8 @@
 
   // Notify the debug event listeners. Indicate auto continue if the break was
   // a debug command break.
-  Isolate::Current()->debugger()->OnDebugBreak(FACTORY->undefined_value(),
-                                               debug_command_only);
+  isolate->debugger()->OnDebugBreak(isolate->factory()->undefined_value(),
+                                    debug_command_only);
 }
 
 
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
index 6f574d4..e8baea7 100644
--- a/src/extensions/experimental/break-iterator.cc
+++ b/src/extensions/experimental/break-iterator.cc
@@ -46,16 +46,16 @@
   return NULL;
 }
 
-UnicodeString* BreakIterator::ResetAdoptedText(
+icu::UnicodeString* BreakIterator::ResetAdoptedText(
     v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
   // Get the previous value from the internal field.
-  UnicodeString* text = static_cast<UnicodeString*>(
+  icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
       obj->GetPointerFromInternalField(1));
   delete text;
 
   // Assign new value to the internal pointer.
   v8::String::Value text_value(value);
-  text = new UnicodeString(
+  text = new icu::UnicodeString(
       reinterpret_cast<const UChar*>(*text_value), text_value.length());
   obj->SetPointerInInternalField(1, text);
 
@@ -74,7 +74,7 @@
   // pointing to a break iterator.
   delete UnpackBreakIterator(persistent_object);
 
-  delete static_cast<UnicodeString*>(
+  delete static_cast<icu::UnicodeString*>(
       persistent_object->GetPointerFromInternalField(1));
 
   // Then dispose of the persistent handle to JS object.
@@ -144,8 +144,9 @@
   }
 
   // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
-  int32_t status =
-      static_cast<RuleBasedBreakIterator*>(break_iterator)->getRuleStatus();
+  icu::RuleBasedBreakIterator* rule_based_iterator =
+      static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+  int32_t status = rule_based_iterator->getRuleStatus();
   // Keep return values in sync with JavaScript BreakType enum.
   if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
     return v8::Int32::New(UBRK_WORD_NONE);
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
index 473bc89..fac1ed8 100644
--- a/src/extensions/experimental/break-iterator.h
+++ b/src/extensions/experimental/break-iterator.h
@@ -51,8 +51,8 @@
 
   // Deletes the old value and sets the adopted text in
   // corresponding JavaScript object.
-  static UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
-                                         v8::Handle<v8::Value> text_value);
+  static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
+                                              v8::Handle<v8::Value> text_value);
 
   // Release memory we allocated for the BreakIterator once the JS object that
   // holds the pointer gets garbage collected.
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
new file mode 100644
index 0000000..7d1a21d
--- /dev/null
+++ b/src/extensions/experimental/collator.cc
@@ -0,0 +1,218 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "collator.h"
+
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/ucol.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
+
+icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
+  if (collator_template_->HasInstance(obj)) {
+    return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a collator.
+  delete UnpackCollator(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("Collator method called on an object "
+                      "that is not a Collator.")));
+}
+
+// Extract a boolean option named in |option| and set it to |result|.
+// Return true if it's specified. Otherwise, return false.
+static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
+                                 const char* option,
+                                 bool* result) {
+  v8::HandleScope handle_scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
+  if (try_catch.HasCaught()) {
+    return false;
+  }
+  // No need to check if |value| is empty because it's taken care of
+  // by TryCatch above.
+  if (!value->IsUndefined() && !value->IsNull()) {
+    if (value->IsBoolean()) {
+      *result = value->BooleanValue();
+      return true;
+    }
+  }
+  return false;
+}
+
+// When there's an ICU error, throw a JavaScript error with |message|.
+static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
+  return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
+}
+
+v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Two string arguments are required.")));
+  }
+
+  icu::Collator* collator = UnpackCollator(args.Holder());
+  if (!collator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  v8::String::Value string_value1(args[0]);
+  v8::String::Value string_value2(args[1]);
+  const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
+  const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
+  UErrorCode status = U_ZERO_ERROR;
+  UCollationResult result = collator->compare(
+      string1, string_value1.length(), string2, string_value2.length(), status);
+
+  if (U_FAILURE(status)) {
+    return ThrowExceptionForICUError(
+        "Unexpected failure in Collator.compare.");
+  }
+
+  return v8::Int32::New(result);
+}
+
+v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and collation options are required.")));
+  }
+
+  v8::String::AsciiValue locale(args[0]);
+  icu::Locale icu_locale(*locale);
+
+  icu::Collator* collator = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  collator = icu::Collator::createInstance(icu_locale, status);
+
+  if (U_FAILURE(status)) {
+    delete collator;
+    return ThrowExceptionForICUError("Failed to create collator.");
+  }
+
+  v8::Local<v8::Object> options(args[1]->ToObject());
+
+  // Below, we change collation options that are explicitly specified
+  // by a caller in JavaScript. Otherwise, we don't touch because
+  // we don't want to change the locale-dependent default value.
+  // The three options below are very likely to have the same default
+  // across locales, but I haven't checked them all. Others we may add
+  // in the future have certainly locale-dependent default (e.g.
+  // caseFirst is upperFirst for Danish while is off for most other locales).
+
+  bool ignore_case, ignore_accents, numeric;
+
+  if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
+    collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
+                           status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set ignoreCase.");
+    }
+  }
+
+  // Accents are taken into account with strength secondary or higher.
+  if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
+    if (!ignore_accents) {
+      collator->setStrength(icu::Collator::SECONDARY);
+    } else {
+      collator->setStrength(icu::Collator::PRIMARY);
+    }
+  }
+
+  if (ExtractBooleanOption(options, "numeric", &numeric)) {
+    collator->setAttribute(UCOL_NUMERIC_COLLATION,
+                           numeric ? UCOL_ON : UCOL_OFF, status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set numeric sort option.");
+    }
+  }
+
+  if (collator_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+    raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal fields for icu collator.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("compare"),
+               v8::FunctionTemplate::New(CollatorCompare));
+
+    collator_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      collator_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set collator as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, collator);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteCollator);
+
+  return wrapper;
+}
+
+} }  // namespace v8::internal
+
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
new file mode 100644
index 0000000..10d6ffb
--- /dev/null
+++ b/src/extensions/experimental/collator.h
@@ -0,0 +1,69 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
+#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
+
+#include <v8.h>
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class Collator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class Collator {
+ public:
+  static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks collator object from corresponding JavaScript object.
+  static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the Collator once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
+
+  // Compare two strings and returns -1, 0 and 1 depending on
+  // whether string1 is smaller than, equal to or larger than string2.
+  static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
+
+ private:
+  Collator() {}
+
+  static v8::Persistent<v8::FunctionTemplate> collator_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
+
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
index 761f4c7..d1194ce 100644
--- a/src/extensions/experimental/experimental.gyp
+++ b/src/extensions/experimental/experimental.gyp
@@ -39,8 +39,13 @@
       'sources': [
         'break-iterator.cc',
         'break-iterator.h',
+        'collator.cc',
+        'collator.h',
         'i18n-extension.cc',
         'i18n-extension.h',
+        'i18n-locale.cc',
+        'i18n-locale.h',
+        '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
       ],
       'include_dirs': [
         '<(icu_src_dir)/public/common',
@@ -48,8 +53,38 @@
       ],
       'dependencies': [
         '<(icu_src_dir)/icu.gyp:*',
+        'js2c_i18n#host',
         '../../../tools/gyp/v8.gyp:v8',
       ],
     },
+    {
+      'target_name': 'js2c_i18n',
+      'type': 'none',
+      'toolsets': ['host'],
+      'variables': {
+        'library_files': [
+          'i18n.js'
+        ],
+      },
+      'actions': [
+        {
+          'action_name': 'js2c_i18n',
+          'inputs': [
+            '../../../tools/js2c.py',
+            '<@(library_files)',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+          ],
+          'action': [
+            'python',
+            '../../../tools/js2c.py',
+            '<@(_outputs)',
+            'I18N',
+            '<@(library_files)'
+          ],
+        },
+      ],
+    },
   ],  # targets
 }
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
index e65fdcc..56bea23 100644
--- a/src/extensions/experimental/i18n-extension.cc
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,249 +27,57 @@
 
 #include "i18n-extension.h"
 
-#include <algorithm>
-#include <string>
-
 #include "break-iterator.h"
-#include "unicode/locid.h"
-#include "unicode/uloc.h"
+#include "collator.h"
+#include "i18n-locale.h"
+#include "natives.h"
 
 namespace v8 {
 namespace internal {
 
 I18NExtension* I18NExtension::extension_ = NULL;
 
-// TODO(cira): maybe move JS code to a .js file and generata cc files from it?
-// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
-const char* const I18NExtension::kSource =
-  "v8Locale = function(optLocale) {"
-  "  native function NativeJSLocale();"
-  "  var properties = NativeJSLocale(optLocale);"
-  "  this.locale = properties.locale;"
-  "  this.language = properties.language;"
-  "  this.script = properties.script;"
-  "  this.region = properties.region;"
-  "};"
-  "v8Locale.availableLocales = function() {"
-  "  native function NativeJSAvailableLocales();"
-  "  return NativeJSAvailableLocales();"
-  "};"
-  "v8Locale.prototype.maximizedLocale = function() {"
-  "  native function NativeJSMaximizedLocale();"
-  "  return new v8Locale(NativeJSMaximizedLocale(this.locale));"
-  "};"
-  "v8Locale.prototype.minimizedLocale = function() {"
-  "  native function NativeJSMinimizedLocale();"
-  "  return new v8Locale(NativeJSMinimizedLocale(this.locale));"
-  "};"
-  "v8Locale.prototype.displayLocale_ = function(displayLocale) {"
-  "  var result = this.locale;"
-  "  if (displayLocale !== undefined) {"
-  "    result = displayLocale.locale;"
-  "  }"
-  "  return result;"
-  "};"
-  "v8Locale.prototype.displayLanguage = function(optDisplayLocale) {"
-  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
-  "  native function NativeJSDisplayLanguage();"
-  "  return NativeJSDisplayLanguage(this.locale, displayLocale);"
-  "};"
-  "v8Locale.prototype.displayScript = function(optDisplayLocale) {"
-  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
-  "  native function NativeJSDisplayScript();"
-  "  return NativeJSDisplayScript(this.locale, displayLocale);"
-  "};"
-  "v8Locale.prototype.displayRegion = function(optDisplayLocale) {"
-  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
-  "  native function NativeJSDisplayRegion();"
-  "  return NativeJSDisplayRegion(this.locale, displayLocale);"
-  "};"
-  "v8Locale.prototype.displayName = function(optDisplayLocale) {"
-  "  var displayLocale = this.displayLocale_(optDisplayLocale);"
-  "  native function NativeJSDisplayName();"
-  "  return NativeJSDisplayName(this.locale, displayLocale);"
-  "};"
-  "v8Locale.v8BreakIterator = function(locale, type) {"
-  "  native function NativeJSBreakIterator();"
-  "  var iterator = NativeJSBreakIterator(locale, type);"
-  "  iterator.type = type;"
-  "  return iterator;"
-  "};"
-  "v8Locale.v8BreakIterator.BreakType = {"
-  "  'unknown': -1,"
-  "  'none': 0,"
-  "  'number': 100,"
-  "  'word': 200,"
-  "  'kana': 300,"
-  "  'ideo': 400"
-  "};"
-  "v8Locale.prototype.v8CreateBreakIterator = function(type) {"
-  "  return new v8Locale.v8BreakIterator(this.locale, type);"
-  "};";
+// Returns a pointer to static string containing the actual
+// JavaScript code generated from i18n.js file.
+static const char* GetScriptSource() {
+  int index = NativesCollection<I18N>::GetIndex("i18n");
+  Vector<const char> script_data =
+      NativesCollection<I18N>::GetScriptSource(index);
+
+  return script_data.start();
+}
+
+I18NExtension::I18NExtension()
+    : v8::Extension("v8/i18n", GetScriptSource()) {
+}
 
 v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
     v8::Handle<v8::String> name) {
   if (name->Equals(v8::String::New("NativeJSLocale"))) {
-    return v8::FunctionTemplate::New(JSLocale);
+    return v8::FunctionTemplate::New(I18NLocale::JSLocale);
   } else if (name->Equals(v8::String::New("NativeJSAvailableLocales"))) {
-    return v8::FunctionTemplate::New(JSAvailableLocales);
+    return v8::FunctionTemplate::New(I18NLocale::JSAvailableLocales);
   } else if (name->Equals(v8::String::New("NativeJSMaximizedLocale"))) {
-    return v8::FunctionTemplate::New(JSMaximizedLocale);
+    return v8::FunctionTemplate::New(I18NLocale::JSMaximizedLocale);
   } else if (name->Equals(v8::String::New("NativeJSMinimizedLocale"))) {
-    return v8::FunctionTemplate::New(JSMinimizedLocale);
+    return v8::FunctionTemplate::New(I18NLocale::JSMinimizedLocale);
   } else if (name->Equals(v8::String::New("NativeJSDisplayLanguage"))) {
-    return v8::FunctionTemplate::New(JSDisplayLanguage);
+    return v8::FunctionTemplate::New(I18NLocale::JSDisplayLanguage);
   } else if (name->Equals(v8::String::New("NativeJSDisplayScript"))) {
-    return v8::FunctionTemplate::New(JSDisplayScript);
+    return v8::FunctionTemplate::New(I18NLocale::JSDisplayScript);
   } else if (name->Equals(v8::String::New("NativeJSDisplayRegion"))) {
-    return v8::FunctionTemplate::New(JSDisplayRegion);
+    return v8::FunctionTemplate::New(I18NLocale::JSDisplayRegion);
   } else if (name->Equals(v8::String::New("NativeJSDisplayName"))) {
-    return v8::FunctionTemplate::New(JSDisplayName);
+    return v8::FunctionTemplate::New(I18NLocale::JSDisplayName);
   } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
     return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
+  } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
+    return v8::FunctionTemplate::New(Collator::JSCollator);
   }
 
   return v8::Handle<v8::FunctionTemplate>();
 }
 
-v8::Handle<v8::Value> I18NExtension::JSLocale(const v8::Arguments& args) {
-  // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
-  // We could possibly pass browser locale as a parameter in the constructor.
-  std::string locale_name("en-US");
-  if (args.Length() == 1 && args[0]->IsString()) {
-    locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  }
-
-  v8::Local<v8::Object> locale = v8::Object::New();
-  locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
-
-  icu::Locale icu_locale(locale_name.c_str());
-
-  const char* language = icu_locale.getLanguage();
-  locale->Set(v8::String::New("language"), v8::String::New(language));
-
-  const char* script = icu_locale.getScript();
-  if (strlen(script)) {
-    locale->Set(v8::String::New("script"), v8::String::New(script));
-  }
-
-  const char* region = icu_locale.getCountry();
-  if (strlen(region)) {
-    locale->Set(v8::String::New("region"), v8::String::New(region));
-  }
-
-  return locale;
-}
-
-// TODO(cira): Filter out locales that Chrome doesn't support.
-v8::Handle<v8::Value> I18NExtension::JSAvailableLocales(
-    const v8::Arguments& args) {
-  v8::Local<v8::Array> all_locales = v8::Array::New();
-
-  int count = 0;
-  const Locale* icu_locales = icu::Locale::getAvailableLocales(count);
-  for (int i = 0; i < count; ++i) {
-    all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
-  }
-
-  return all_locales;
-}
-
-// Use - as tag separator, not _ that ICU uses.
-static std::string NormalizeLocale(const std::string& locale) {
-  std::string result(locale);
-  // TODO(cira): remove STL dependency.
-  std::replace(result.begin(), result.end(), '_', '-');
-  return result;
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMaximizedLocale(
-    const v8::Arguments& args) {
-  if (!args.Length() || !args[0]->IsString()) {
-    return v8::Undefined();
-  }
-
-  UErrorCode status = U_ZERO_ERROR;
-  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  char max_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_addLikelySubtags(locale_name.c_str(), max_locale,
-                        sizeof(max_locale), &status);
-  if (U_FAILURE(status)) {
-    return v8::Undefined();
-  }
-
-  return v8::String::New(NormalizeLocale(max_locale).c_str());
-}
-
-v8::Handle<v8::Value> I18NExtension::JSMinimizedLocale(
-    const v8::Arguments& args) {
-  if (!args.Length() || !args[0]->IsString()) {
-    return v8::Undefined();
-  }
-
-  UErrorCode status = U_ZERO_ERROR;
-  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
-  char min_locale[ULOC_FULLNAME_CAPACITY];
-  uloc_minimizeSubtags(locale_name.c_str(), min_locale,
-                       sizeof(min_locale), &status);
-  if (U_FAILURE(status)) {
-    return v8::Undefined();
-  }
-
-  return v8::String::New(NormalizeLocale(min_locale).c_str());
-}
-
-// Common code for JSDisplayXXX methods.
-static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
-                                            const std::string& item) {
-  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
-    return v8::Undefined();
-  }
-
-  std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
-  icu::Locale icu_locale(base_locale.c_str());
-  icu::Locale display_locale =
-      icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
-  UnicodeString result;
-  if (item == "language") {
-    icu_locale.getDisplayLanguage(display_locale, result);
-  } else if (item == "script") {
-    icu_locale.getDisplayScript(display_locale, result);
-  } else if (item == "region") {
-    icu_locale.getDisplayCountry(display_locale, result);
-  } else if (item == "name") {
-    icu_locale.getDisplayName(display_locale, result);
-  } else {
-    return v8::Undefined();
-  }
-
-  if (result.length()) {
-    return v8::String::New(
-        reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
-  }
-
-  return v8::Undefined();
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayLanguage(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "language");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayScript(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "script");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayRegion(
-    const v8::Arguments& args) {
-  return GetDisplayItem(args, "region");
-}
-
-v8::Handle<v8::Value> I18NExtension::JSDisplayName(const v8::Arguments& args) {
-  return GetDisplayItem(args, "name");
-}
-
 I18NExtension* I18NExtension::get() {
   if (!extension_) {
     extension_ = new I18NExtension();
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
index 629332b..b4dc7c3 100644
--- a/src/extensions/experimental/i18n-extension.h
+++ b/src/extensions/experimental/i18n-extension.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,26 +36,16 @@
 
 class I18NExtension : public v8::Extension {
  public:
-  I18NExtension() : v8::Extension("v8/i18n", kSource) {}
+  I18NExtension();
+
   virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
       v8::Handle<v8::String> name);
 
-  // Implementations of window.Locale methods.
-  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
-  static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
-
   // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
   static void Register();
   static I18NExtension* get();
 
  private:
-  static const char* const kSource;
   static I18NExtension* extension_;
 };
 
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
new file mode 100644
index 0000000..e5e1cf8
--- /dev/null
+++ b/src/extensions/experimental/i18n-locale.cc
@@ -0,0 +1,172 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "i18n-locale.h"
+
+#include <algorithm>
+#include <string>
+
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
+  // TODO(cira): Fetch browser locale. Accept en-US as good default for now.
+  // We could possibly pass browser locale as a parameter in the constructor.
+  std::string locale_name("en-US");
+  if (args.Length() == 1 && args[0]->IsString()) {
+    locale_name = *v8::String::Utf8Value(args[0]->ToString());
+  }
+
+  v8::Local<v8::Object> locale = v8::Object::New();
+  locale->Set(v8::String::New("locale"), v8::String::New(locale_name.c_str()));
+
+  icu::Locale icu_locale(locale_name.c_str());
+
+  const char* language = icu_locale.getLanguage();
+  locale->Set(v8::String::New("language"), v8::String::New(language));
+
+  const char* script = icu_locale.getScript();
+  if (strlen(script)) {
+    locale->Set(v8::String::New("script"), v8::String::New(script));
+  }
+
+  const char* region = icu_locale.getCountry();
+  if (strlen(region)) {
+    locale->Set(v8::String::New("region"), v8::String::New(region));
+  }
+
+  return locale;
+}
+
+// TODO(cira): Filter out locales that Chrome doesn't support.
+v8::Handle<v8::Value> I18NLocale::JSAvailableLocales(
+    const v8::Arguments& args) {
+  v8::Local<v8::Array> all_locales = v8::Array::New();
+
+  int count = 0;
+  const icu::Locale* icu_locales = icu::Locale::getAvailableLocales(count);
+  for (int i = 0; i < count; ++i) {
+    all_locales->Set(i, v8::String::New(icu_locales[i].getName()));
+  }
+
+  return all_locales;
+}
+
+// Use - as tag separator, not _ that ICU uses.
+static std::string NormalizeLocale(const std::string& locale) {
+  std::string result(locale);
+  // TODO(cira): remove STL dependency.
+  std::replace(result.begin(), result.end(), '_', '-');
+  return result;
+}
+
+v8::Handle<v8::Value> I18NLocale::JSMaximizedLocale(const v8::Arguments& args) {
+  if (!args.Length() || !args[0]->IsString()) {
+    return v8::Undefined();
+  }
+
+  UErrorCode status = U_ZERO_ERROR;
+  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
+  char max_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_addLikelySubtags(locale_name.c_str(), max_locale,
+                        sizeof(max_locale), &status);
+  if (U_FAILURE(status)) {
+    return v8::Undefined();
+  }
+
+  return v8::String::New(NormalizeLocale(max_locale).c_str());
+}
+
+v8::Handle<v8::Value> I18NLocale::JSMinimizedLocale(const v8::Arguments& args) {
+  if (!args.Length() || !args[0]->IsString()) {
+    return v8::Undefined();
+  }
+
+  UErrorCode status = U_ZERO_ERROR;
+  std::string locale_name = *v8::String::Utf8Value(args[0]->ToString());
+  char min_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_minimizeSubtags(locale_name.c_str(), min_locale,
+                       sizeof(min_locale), &status);
+  if (U_FAILURE(status)) {
+    return v8::Undefined();
+  }
+
+  return v8::String::New(NormalizeLocale(min_locale).c_str());
+}
+
+// Common code for JSDisplayXXX methods.
+static v8::Handle<v8::Value> GetDisplayItem(const v8::Arguments& args,
+                                            const std::string& item) {
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::Undefined();
+  }
+
+  std::string base_locale = *v8::String::Utf8Value(args[0]->ToString());
+  icu::Locale icu_locale(base_locale.c_str());
+  icu::Locale display_locale =
+      icu::Locale(*v8::String::Utf8Value(args[1]->ToString()));
+  icu::UnicodeString result;
+  if (item == "language") {
+    icu_locale.getDisplayLanguage(display_locale, result);
+  } else if (item == "script") {
+    icu_locale.getDisplayScript(display_locale, result);
+  } else if (item == "region") {
+    icu_locale.getDisplayCountry(display_locale, result);
+  } else if (item == "name") {
+    icu_locale.getDisplayName(display_locale, result);
+  } else {
+    return v8::Undefined();
+  }
+
+  if (result.length()) {
+    return v8::String::New(
+        reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+  }
+
+  return v8::Undefined();
+}
+
+v8::Handle<v8::Value> I18NLocale::JSDisplayLanguage(const v8::Arguments& args) {
+  return GetDisplayItem(args, "language");
+}
+
+v8::Handle<v8::Value> I18NLocale::JSDisplayScript(const v8::Arguments& args) {
+  return GetDisplayItem(args, "script");
+}
+
+v8::Handle<v8::Value> I18NLocale::JSDisplayRegion(const v8::Arguments& args) {
+  return GetDisplayItem(args, "region");
+}
+
+v8::Handle<v8::Value> I18NLocale::JSDisplayName(const v8::Arguments& args) {
+  return GetDisplayItem(args, "name");
+}
+
+} }  // namespace v8::internal
diff --git a/src/arm/register-allocator-arm.h b/src/extensions/experimental/i18n-locale.h
similarity index 63%
rename from src/arm/register-allocator-arm.h
rename to src/extensions/experimental/i18n-locale.h
index fdbc88f..aa9adbe 100644
--- a/src/arm/register-allocator-arm.h
+++ b/src/extensions/experimental/i18n-locale.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,20 +25,29 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
-#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+
+#include <v8.h>
 
 namespace v8 {
 namespace internal {
 
-class RegisterAllocatorConstants : public AllStatic {
+class I18NLocale {
  public:
-  // No registers are currently managed by the register allocator on ARM.
-  static const int kNumRegisters = 0;
-  static const int kInvalidRegister = -1;
-};
+  I18NLocale() {}
 
+  // Implementations of window.Locale methods.
+  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSAvailableLocales(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSMaximizedLocale(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSMinimizedLocale(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayLanguage(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayScript(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayRegion(const v8::Arguments& args);
+  static v8::Handle<v8::Value> JSDisplayName(const v8::Arguments& args);
+};
 
 } }  // namespace v8::internal
 
-#endif  // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
new file mode 100644
index 0000000..5a74905
--- /dev/null
+++ b/src/extensions/experimental/i18n.js
@@ -0,0 +1,116 @@
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Remove v8 prefix from v8Locale once we have stable API.
+v8Locale = function(optLocale) {
+  native function NativeJSLocale();
+  var properties = NativeJSLocale(optLocale);
+  this.locale = properties.locale;
+  this.language = properties.language;
+  this.script = properties.script;
+  this.region = properties.region;
+};
+
+v8Locale.availableLocales = function() {
+  native function NativeJSAvailableLocales();
+  return NativeJSAvailableLocales();
+};
+
+v8Locale.prototype.maximizedLocale = function() {
+  native function NativeJSMaximizedLocale();
+  return new v8Locale(NativeJSMaximizedLocale(this.locale));
+};
+
+v8Locale.prototype.minimizedLocale = function() {
+  native function NativeJSMinimizedLocale();
+  return new v8Locale(NativeJSMinimizedLocale(this.locale));
+};
+
+v8Locale.prototype.displayLocale_ = function(displayLocale) {
+  var result = this.locale;
+  if (displayLocale !== undefined) {
+    result = displayLocale.locale;
+  }
+  return result;
+};
+
+v8Locale.prototype.displayLanguage = function(optDisplayLocale) {
+  var displayLocale = this.displayLocale_(optDisplayLocale);
+  native function NativeJSDisplayLanguage();
+  return NativeJSDisplayLanguage(this.locale, displayLocale);
+};
+
+v8Locale.prototype.displayScript = function(optDisplayLocale) {
+  var displayLocale = this.displayLocale_(optDisplayLocale);
+  native function NativeJSDisplayScript();
+  return NativeJSDisplayScript(this.locale, displayLocale);
+};
+
+v8Locale.prototype.displayRegion = function(optDisplayLocale) {
+  var displayLocale = this.displayLocale_(optDisplayLocale);
+  native function NativeJSDisplayRegion();
+  return NativeJSDisplayRegion(this.locale, displayLocale);
+};
+
+v8Locale.prototype.displayName = function(optDisplayLocale) {
+  var displayLocale = this.displayLocale_(optDisplayLocale);
+  native function NativeJSDisplayName();
+  return NativeJSDisplayName(this.locale, displayLocale);
+};
+
+v8Locale.v8BreakIterator = function(locale, type) {
+  native function NativeJSBreakIterator();
+  var iterator = NativeJSBreakIterator(locale, type);
+  iterator.type = type;
+  return iterator;
+};
+
+v8Locale.v8BreakIterator.BreakType = {
+  'unknown': -1,
+  'none': 0,
+  'number': 100,
+  'word': 200,
+  'kana': 300,
+  'ideo': 400
+};
+
+v8Locale.prototype.v8CreateBreakIterator = function(type) {
+  return new v8Locale.v8BreakIterator(this.locale, type);
+};
+
+// TODO(jungshik): Set |collator.options| to actually recognized / resolved
+// values.
+v8Locale.Collator = function(locale, options) {
+  native function NativeJSCollator();
+  var collator = NativeJSCollator(locale,
+      options === undefined ? {} : options);
+  return collator;
+};
+
+v8Locale.prototype.createCollator = function(options) {
+  return new v8Locale.Collator(this.locale, options);
+};
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0bc6409..69139bb 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -96,6 +96,9 @@
 //
 #define FLAG FLAG_FULL
 
+// Flags for experimental language features.
+DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
+
 // Flags for Crankshaft.
 #ifdef V8_TARGET_ARCH_MIPS
   DEFINE_bool(crankshaft, false, "use crankshaft")
@@ -162,7 +165,8 @@
 DEFINE_bool(enable_sahf, true,
             "enable use of SAHF instruction if available (X64 only)")
 DEFINE_bool(enable_vfp3, true,
-            "enable use of VFP3 instructions if available (ARM only)")
+            "enable use of VFP3 instructions if available - this implies "
+            "enabling ARMv7 instructions (ARM only)")
 DEFINE_bool(enable_armv7, true,
             "enable use of ARMv7 instructions if available (ARM only)")
 DEFINE_bool(enable_fpu, true,
diff --git a/src/frames-inl.h b/src/frames-inl.h
index e6eaec0..5951806 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -88,6 +88,11 @@
 }
 
 
+inline StackFrame::StackFrame(StackFrameIterator* iterator)
+    : iterator_(iterator), isolate_(iterator_->isolate()) {
+}
+
+
 inline StackHandler* StackFrame::top_handler() const {
   return iterator_->handler();
 }
@@ -143,15 +148,26 @@
 }
 
 
+Address JavaScriptFrame::GetParameterSlot(int index) const {
+  int param_count = ComputeParametersCount();
+  ASSERT(-1 <= index && index < param_count);
+  int parameter_offset = (param_count - index - 1) * kPointerSize;
+  return caller_sp() + parameter_offset;
+}
+
+
+Object* JavaScriptFrame::GetParameter(int index) const {
+  return Memory::Object_at(GetParameterSlot(index));
+}
+
+
 inline Object* JavaScriptFrame::receiver() const {
-  const int offset = JavaScriptFrameConstants::kReceiverOffset;
-  return Memory::Object_at(caller_sp() + offset);
+  return GetParameter(-1);
 }
 
 
 inline void JavaScriptFrame::set_receiver(Object* value) {
-  const int offset = JavaScriptFrameConstants::kReceiverOffset;
-  Memory::Object_at(caller_sp() + offset) = value;
+  Memory::Object_at(GetParameterSlot(-1)) = value;
 }
 
 
@@ -168,6 +184,13 @@
 
 
 template<typename Iterator>
+inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
+    Isolate* isolate)
+    : iterator_(isolate) {
+  if (!done()) Advance();
+}
+
+template<typename Iterator>
 inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
   // problematic that we can't use the safe-cast operator to cast to
@@ -181,11 +204,9 @@
 
 template<typename Iterator>
 JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
-    StackFrame::Id id) {
-  while (!done()) {
-    Advance();
-    if (frame()->id() == id) return;
-  }
+    Isolate* isolate, StackFrame::Id id)
+    : iterator_(isolate) {
+  AdvanceToId(id);
 }
 
 
@@ -206,6 +227,15 @@
 
 
 template<typename Iterator>
+void JavaScriptFrameIteratorTemp<Iterator>::AdvanceToId(StackFrame::Id id) {
+  while (!done()) {
+    Advance();
+    if (frame()->id() == id) return;
+  }
+}
+
+
+template<typename Iterator>
 void JavaScriptFrameIteratorTemp<Iterator>::Reset() {
   iterator_.Reset();
   if (!done()) Advance();
diff --git a/src/frames.cc b/src/frames.cc
index 79aa250..e0517c8 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -39,9 +39,6 @@
 namespace v8 {
 namespace internal {
 
-
-int SafeStackFrameIterator::active_count_ = 0;
-
 // Iterator that supports traversing the stack handlers of a
 // particular frame. Needs to know the top of the handler chain.
 class StackHandlerIterator BASE_EMBEDDED {
@@ -73,23 +70,34 @@
 
 #define INITIALIZE_SINGLETON(type, field) field##_(this),
 StackFrameIterator::StackFrameIterator()
-    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+    : isolate_(Isolate::Current()),
+      STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
       frame_(NULL), handler_(NULL),
-      thread_(Isolate::Current()->thread_local_top()),
+      thread_(isolate_->thread_local_top()),
       fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
   Reset();
 }
-StackFrameIterator::StackFrameIterator(ThreadLocalTop* t)
-    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+StackFrameIterator::StackFrameIterator(Isolate* isolate)
+    : isolate_(isolate),
+      STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+      frame_(NULL), handler_(NULL),
+      thread_(isolate_->thread_local_top()),
+      fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
+  Reset();
+}
+StackFrameIterator::StackFrameIterator(Isolate* isolate, ThreadLocalTop* t)
+    : isolate_(isolate),
+      STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
       frame_(NULL), handler_(NULL), thread_(t),
       fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
   Reset();
 }
 StackFrameIterator::StackFrameIterator(Isolate* isolate,
                                        bool use_top, Address fp, Address sp)
-    : STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
+    : isolate_(isolate),
+      STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
       frame_(NULL), handler_(NULL),
-      thread_(use_top ? isolate->thread_local_top() : NULL),
+      thread_(use_top ? isolate_->thread_local_top() : NULL),
       fp_(use_top ? NULL : fp), sp_(sp),
       advance_(use_top ? &StackFrameIterator::AdvanceWithHandler :
                &StackFrameIterator::AdvanceWithoutHandler) {
@@ -147,7 +155,7 @@
     state.sp = sp_;
     state.pc_address =
         reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
-    type = StackFrame::ComputeType(&state);
+    type = StackFrame::ComputeType(isolate(), &state);
   }
   if (SingletonFor(type) == NULL) return;
   frame_ = SingletonFor(type, &state);
@@ -188,6 +196,12 @@
 }
 
 
+StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
+    : JavaScriptFrameIterator(isolate) {
+  if (!done() && !IsValidFrame()) Advance();
+}
+
+
 void StackTraceFrameIterator::Advance() {
   while (true) {
     JavaScriptFrameIterator::Advance();
@@ -221,10 +235,24 @@
 }
 
 
+SafeStackFrameIterator::ActiveCountMaintainer::ActiveCountMaintainer(
+    Isolate* isolate)
+    : isolate_(isolate) {
+  isolate_->set_safe_stack_iterator_counter(
+      isolate_->safe_stack_iterator_counter() + 1);
+}
+
+
+SafeStackFrameIterator::ActiveCountMaintainer::~ActiveCountMaintainer() {
+  isolate_->set_safe_stack_iterator_counter(
+      isolate_->safe_stack_iterator_counter() - 1);
+}
+
+
 SafeStackFrameIterator::SafeStackFrameIterator(
     Isolate* isolate,
     Address fp, Address sp, Address low_bound, Address high_bound) :
-    maintainer_(),
+    maintainer_(isolate),
     stack_validator_(low_bound, high_bound),
     is_valid_top_(IsValidTop(isolate, low_bound, high_bound)),
     is_valid_fp_(IsWithinBounds(low_bound, high_bound, fp)),
@@ -233,6 +261,10 @@
     iterator_(isolate, is_valid_top_, is_valid_fp_ ? fp : NULL, sp) {
 }
 
+bool SafeStackFrameIterator::is_active(Isolate* isolate) {
+  return isolate->safe_stack_iterator_counter() > 0;
+}
+
 
 bool SafeStackFrameIterator::IsValidTop(Isolate* isolate,
                                         Address low_bound, Address high_bound) {
@@ -333,10 +365,10 @@
 #endif
 
 
-Code* StackFrame::GetSafepointData(Address pc,
+Code* StackFrame::GetSafepointData(Isolate* isolate,
+                                   Address pc,
                                    SafepointEntry* safepoint_entry,
                                    unsigned* stack_slots) {
-  Isolate* isolate = Isolate::Current();
   PcToCodeCache::PcToCodeCacheEntry* entry =
       isolate->pc_to_code_cache()->GetCacheEntry(pc);
   SafepointEntry cached_safepoint_entry = entry->safepoint_entry;
@@ -377,7 +409,7 @@
 }
 
 
-StackFrame::Type StackFrame::ComputeType(State* state) {
+StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
   ASSERT(state->fp != NULL);
   if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
     return ARGUMENTS_ADAPTOR;
@@ -392,9 +424,8 @@
     // frames as normal JavaScript frames to avoid having to look
     // into the heap to determine the state. This is safe as long
     // as nobody tries to GC...
-    if (SafeStackFrameIterator::is_active()) return JAVA_SCRIPT;
-    Code::Kind kind = GetContainingCode(Isolate::Current(),
-                                        *(state->pc_address))->kind();
+    if (SafeStackFrameIterator::is_active(isolate)) return JAVA_SCRIPT;
+    Code::Kind kind = GetContainingCode(isolate, *(state->pc_address))->kind();
     ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
     return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
   }
@@ -405,7 +436,7 @@
 
 StackFrame::Type StackFrame::GetCallerState(State* state) const {
   ComputeCallerState(state);
-  return ComputeType(state);
+  return ComputeType(isolate(), state);
 }
 
 
@@ -465,7 +496,7 @@
 void ExitFrame::Iterate(ObjectVisitor* v) const {
   // The arguments are traversed as part of the expression stack of
   // the calling frame.
-  IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
+  IteratePc(v, pc_address(), LookupCode());
   v->VisitPointer(&code_slot());
 }
 
@@ -539,18 +570,16 @@
 
   // Make sure that we're not doing "safe" stack frame iteration. We cannot
   // possibly find pointers in optimized frames in that state.
-  ASSERT(!SafeStackFrameIterator::is_active());
+  ASSERT(!SafeStackFrameIterator::is_active(isolate()));
 
   // Compute the safepoint information.
   unsigned stack_slots = 0;
   SafepointEntry safepoint_entry;
   Code* code = StackFrame::GetSafepointData(
-      pc(), &safepoint_entry, &stack_slots);
+      isolate(), pc(), &safepoint_entry, &stack_slots);
   unsigned slot_space = stack_slots * kPointerSize;
 
-  // Visit the outgoing parameters. This is usually dealt with by the
-  // callee, but while GC'ing we artificially lower the number of
-  // arguments to zero and let the caller deal with it.
+  // Visit the outgoing parameters.
   Object** parameters_base = &Memory::Object_at(sp());
   Object** parameters_limit = &Memory::Object_at(
       fp() + JavaScriptFrameConstants::kFunctionOffset - slot_space);
@@ -604,21 +633,6 @@
 
   // Visit the return address in the callee and incoming arguments.
   IteratePc(v, pc_address(), code);
-  IterateArguments(v);
-}
-
-
-Object* JavaScriptFrame::GetParameter(int index) const {
-  ASSERT(index >= 0 && index < ComputeParametersCount());
-  const int offset = JavaScriptFrameConstants::kParam0Offset;
-  return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
-}
-
-
-int JavaScriptFrame::ComputeParametersCount() const {
-  Address base  = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
-  Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
-  return static_cast<int>((base - limit) / kPointerSize);
 }
 
 
@@ -638,27 +652,17 @@
 }
 
 
+int JavaScriptFrame::GetNumberOfIncomingArguments() const {
+  ASSERT(!SafeStackFrameIterator::is_active(isolate()) &&
+         isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
+
+  JSFunction* function = JSFunction::cast(this->function());
+  return function->shared()->formal_parameter_count();
+}
+
+
 Address JavaScriptFrame::GetCallerStackPointer() const {
-  int arguments;
-  if (SafeStackFrameIterator::is_active() ||
-      HEAP->gc_state() != Heap::NOT_IN_GC) {
-    // If the we are currently iterating the safe stack the
-    // arguments for frames are traversed as if they were
-    // expression stack elements of the calling frame. The reason for
-    // this rather strange decision is that we cannot access the
-    // function during mark-compact GCs when objects may have been marked.
-    // In fact accessing heap objects (like function->shared() below)
-    // at all during GC is problematic.
-    arguments = 0;
-  } else {
-    // Compute the number of arguments by getting the number of formal
-    // parameters of the function. We must remember to take the
-    // receiver into account (+1).
-    JSFunction* function = JSFunction::cast(this->function());
-    arguments = function->shared()->formal_parameter_count() + 1;
-  }
-  const int offset = StandardFrameConstants::kCallerSPOffset;
-  return fp() + offset + (arguments * kPointerSize);
+  return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
 
@@ -670,7 +674,7 @@
 
 void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
   ASSERT(functions->length() == 0);
-  Code* code_pointer = LookupCode(Isolate::Current());
+  Code* code_pointer = LookupCode();
   int offset = static_cast<int>(pc() - code_pointer->address());
   FrameSummary summary(receiver(),
                        JSFunction::cast(function()),
@@ -789,7 +793,7 @@
   // back to a slow search in this case to find the original optimized
   // code object.
   if (!code->contains(pc())) {
-    code = Isolate::Current()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
+    code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
   }
   ASSERT(code != NULL);
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -836,9 +840,7 @@
 
 
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
-  const int arguments = Smi::cast(GetExpression(0))->value();
-  const int offset = StandardFrameConstants::kCallerSPOffset;
-  return fp() + offset + (arguments + 1) * kPointerSize;
+  return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
 
@@ -850,7 +852,7 @@
 
 
 Code* ArgumentsAdaptorFrame::unchecked_code() const {
-  return Isolate::Current()->builtins()->builtin(
+  return isolate()->builtins()->builtin(
       Builtins::kArgumentsAdaptorTrampoline);
 }
 
@@ -1045,14 +1047,14 @@
   ASSERT(!it.done());
   StackHandler* handler = it.handler();
   ASSERT(handler->is_entry());
-  handler->Iterate(v, LookupCode(Isolate::Current()));
+  handler->Iterate(v, LookupCode());
 #ifdef DEBUG
   // Make sure that the entry frame does not contain more than one
   // stack handler.
   it.Advance();
   ASSERT(it.done());
 #endif
-  IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
+  IteratePc(v, pc_address(), LookupCode());
 }
 
 
@@ -1069,7 +1071,7 @@
     v->VisitPointers(base, reinterpret_cast<Object**>(address));
     base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
     // Traverse the pointers in the handler itself.
-    handler->Iterate(v, LookupCode(Isolate::Current()));
+    handler->Iterate(v, LookupCode());
   }
   v->VisitPointers(base, limit);
 }
@@ -1077,18 +1079,7 @@
 
 void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
   IterateExpressions(v);
-  IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
-  IterateArguments(v);
-}
-
-
-void JavaScriptFrame::IterateArguments(ObjectVisitor* v) const {
-  // Traverse callee-saved registers, receiver, and parameters.
-  const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
-  const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
-  Object** base = &Memory::Object_at(fp() + kBaseOffset);
-  Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
-  v->VisitPointers(base, limit);
+  IteratePc(v, pc_address(), LookupCode());
 }
 
 
@@ -1096,7 +1087,7 @@
   // Internal frames only have object pointers on the expression stack
   // as they never have any arguments.
   IterateExpressions(v);
-  IteratePc(v, pc_address(), LookupCode(Isolate::Current()));
+  IteratePc(v, pc_address(), LookupCode());
 }
 
 
diff --git a/src/frames.h b/src/frames.h
index bee95cc..da9009b 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -158,10 +158,12 @@
     Address* pc_address;
   };
 
-  // Copy constructor; it breaks the connection to host iterator.
+  // Copy constructor; it breaks the connection to host iterator
+  // (as an iterator usually lives on stack).
   StackFrame(const StackFrame& original) {
     this->state_ = original.state_;
     this->iterator_ = NULL;
+    this->isolate_ = original.isolate_;
   }
 
   // Type testers.
@@ -205,8 +207,8 @@
   virtual Code* unchecked_code() const = 0;
 
   // Get the code associated with this frame.
-  Code* LookupCode(Isolate* isolate) const {
-    return GetContainingCode(isolate, pc());
+  Code* LookupCode() const {
+    return GetContainingCode(isolate(), pc());
   }
 
   // Get the code object that contains the given pc.
@@ -215,7 +217,8 @@
   // Get the code object containing the given pc and fill in the
   // safepoint entry and the number of stack slots. The pc must be at
   // a safepoint.
-  static Code* GetSafepointData(Address pc,
+  static Code* GetSafepointData(Isolate* isolate,
+                                Address pc,
                                 SafepointEntry* safepoint_entry,
                                 unsigned* stack_slots);
 
@@ -230,9 +233,11 @@
                      int index) const { }
 
  protected:
-  explicit StackFrame(StackFrameIterator* iterator) : iterator_(iterator) { }
+  inline explicit StackFrame(StackFrameIterator* iterator);
   virtual ~StackFrame() { }
 
+  Isolate* isolate() const { return isolate_; }
+
   // Compute the stack pointer for the calling frame.
   virtual Address GetCallerStackPointer() const = 0;
 
@@ -245,10 +250,11 @@
   inline StackHandler* top_handler() const;
 
   // Compute the stack frame type for the given state.
-  static Type ComputeType(State* state);
+  static Type ComputeType(Isolate* isolate, State* state);
 
  private:
   const StackFrameIterator* iterator_;
+  Isolate* isolate_;
   State state_;
 
   // Fill in the state of the calling frame.
@@ -257,6 +263,8 @@
   // Get the type and the state of the calling frame.
   virtual Type GetCallerState(State* state) const;
 
+  static const intptr_t kIsolateTag = 1;
+
   friend class StackFrameIterator;
   friend class StackHandlerIterator;
   friend class SafeStackFrameIterator;
@@ -430,7 +438,7 @@
   Handle<Object> receiver() { return receiver_; }
   Handle<JSFunction> function() { return function_; }
   Handle<Code> code() { return code_; }
-  Address pc() { return reinterpret_cast<Address>(*code_) + offset_; }
+  Address pc() { return code_->address() + offset_; }
   int offset() { return offset_; }
   bool is_constructor() { return is_constructor_; }
 
@@ -455,8 +463,11 @@
   inline void set_receiver(Object* value);
 
   // Access the parameters.
-  Object* GetParameter(int index) const;
-  int ComputeParametersCount() const;
+  inline Address GetParameterSlot(int index) const;
+  inline Object* GetParameter(int index) const;
+  inline int ComputeParametersCount() const {
+    return GetNumberOfIncomingArguments();
+  }
 
   // Check if this frame is a constructor frame invoked through 'new'.
   bool IsConstructor() const;
@@ -494,6 +505,8 @@
 
   virtual Address GetCallerStackPointer() const;
 
+  virtual int GetNumberOfIncomingArguments() const;
+
   // Garbage collection support. Iterates over incoming arguments,
   // receiver, and any callee-saved registers.
   void IterateArguments(ObjectVisitor* v) const;
@@ -554,6 +567,10 @@
   explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
       : JavaScriptFrame(iterator) { }
 
+  virtual int GetNumberOfIncomingArguments() const {
+    return Smi::cast(GetExpression(0))->value();
+  }
+
   virtual Address GetCallerStackPointer() const;
 
  private:
@@ -609,11 +626,15 @@
 
 class StackFrameIterator BASE_EMBEDDED {
  public:
-  // An iterator that iterates over the current thread's stack.
+  // An iterator that iterates over the current thread's stack,
+  // and uses current isolate.
   StackFrameIterator();
 
+  // An iterator that iterates over the isolate's current thread's stack,
+  explicit StackFrameIterator(Isolate* isolate);
+
   // An iterator that iterates over a given thread's stack.
-  explicit StackFrameIterator(ThreadLocalTop* thread);
+  StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
 
   // An iterator that can start from a given FP address.
   // If use_top, then work as usual, if fp isn't NULL, use it,
@@ -625,6 +646,8 @@
     return frame_;
   }
 
+  Isolate* isolate() const { return isolate_; }
+
   bool done() const { return frame_ == NULL; }
   void Advance() { (this->*advance_)(); }
 
@@ -632,6 +655,7 @@
   void Reset();
 
  private:
+  Isolate* isolate_;
 #define DECLARE_SINGLETON(ignore, type) type type##_;
   STACK_FRAME_TYPE_LIST(DECLARE_SINGLETON)
 #undef DECLARE_SINGLETON
@@ -667,13 +691,12 @@
  public:
   JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
 
-  explicit JavaScriptFrameIteratorTemp(ThreadLocalTop* thread) :
-      iterator_(thread) {
-    if (!done()) Advance();
-  }
+  inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
 
   // Skip frames until the frame with the given id is reached.
-  explicit JavaScriptFrameIteratorTemp(StackFrame::Id id);
+  explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
+
+  inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
 
   JavaScriptFrameIteratorTemp(Address fp, Address sp,
                               Address low_bound, Address high_bound) :
@@ -702,6 +725,8 @@
   void Reset();
 
  private:
+  inline void AdvanceToId(StackFrame::Id id);
+
   Iterator iterator_;
 };
 
@@ -716,6 +741,7 @@
 class StackTraceFrameIterator: public JavaScriptFrameIterator {
  public:
   StackTraceFrameIterator();
+  explicit StackTraceFrameIterator(Isolate* isolate);
   void Advance();
 
  private:
@@ -739,7 +765,7 @@
   void Advance();
   void Reset();
 
-  static bool is_active() { return active_count_ > 0; }
+  static bool is_active(Isolate* isolate);
 
   static bool IsWithinBounds(
       Address low_bound, Address high_bound, Address addr) {
@@ -786,13 +812,13 @@
   // heap objects.
   class ActiveCountMaintainer BASE_EMBEDDED {
    public:
-    ActiveCountMaintainer() { active_count_++; }
-    ~ActiveCountMaintainer() { active_count_--; }
+    explicit ActiveCountMaintainer(Isolate* isolate);
+    ~ActiveCountMaintainer();
+   private:
+    Isolate* isolate_;
   };
 
   ActiveCountMaintainer maintainer_;
-  // TODO(isolates): this is dangerous.
-  static int active_count_;
   StackAddressValidator stack_validator_;
   const bool is_valid_top_;
   const bool is_valid_fp_;
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index d509cd5..d6ba56e 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,7 @@
 
 #include "v8.h"
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -213,12 +213,6 @@
 }
 
 
-void BreakableStatementChecker::VisitIncrementOperation(
-    IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 void BreakableStatementChecker::VisitProperty(Property* expr) {
   // Property load is breakable.
   is_breakable_ = true;
@@ -286,7 +280,7 @@
   }
   CodeGenerator::MakeCodePrologue(info);
   const int kInitialBufferSize = 4 * KB;
-  MacroAssembler masm(NULL, kInitialBufferSize);
+  MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
 #ifdef ENABLE_GDB_JIT_INTERFACE
   masm.positions_recorder()->StartGDBJITLineInfoRecording();
 #endif
@@ -1357,11 +1351,6 @@
 }
 
 
-void FullCodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
   // The macros used here must preserve the result register.
   __ Drop(stack_depth);
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index c8dbf5d..bf8ac19 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1445,11 +1445,16 @@
 }
 
 
+Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
+
+
 void GDBJITInterface::AddCode(const char* name,
                               Code* code,
                               GDBJITInterface::CodeTag tag,
                               Script* script) {
   if (!FLAG_gdbjit) return;
+
+  ScopedLock lock(mutex_);
   AssertNoAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -1518,6 +1523,7 @@
 void GDBJITInterface::RemoveCode(Code* code) {
   if (!FLAG_gdbjit) return;
 
+  ScopedLock lock(mutex_);
   HashMap::Entry* e = GetEntries()->Lookup(code,
                                            HashForCodeObject(code),
                                            false);
@@ -1537,6 +1543,7 @@
 
 void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
                                                GDBJITLineInfo* line_info) {
+  ScopedLock lock(mutex_);
   ASSERT(!IsLineInfoTagged(line_info));
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   ASSERT(e->value == NULL);
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index d46fec6..de6928f 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -126,6 +126,9 @@
   static void RemoveCode(Code* code);
 
   static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
+
+ private:
+  static Mutex* mutex_;
 };
 
 #define GDBJIT(action) GDBJITInterface::action
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 4d13859..c4e8f13 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -558,28 +558,25 @@
 void GlobalHandles::AddObjectGroup(Object*** handles,
                                    size_t length,
                                    v8::RetainedObjectInfo* info) {
-  ObjectGroup* new_entry = new ObjectGroup(length, info);
-  for (size_t i = 0; i < length; ++i) {
-    new_entry->objects_.Add(handles[i]);
+  if (length == 0) {
+    if (info != NULL) info->Dispose();
+    return;
   }
-  object_groups_.Add(new_entry);
+  object_groups_.Add(ObjectGroup::New(handles, length, info));
 }
 
 
-void GlobalHandles::AddImplicitReferences(HeapObject* parent,
+void GlobalHandles::AddImplicitReferences(HeapObject** parent,
                                           Object*** children,
                                           size_t length) {
-  ImplicitRefGroup* new_entry = new ImplicitRefGroup(parent, length);
-  for (size_t i = 0; i < length; ++i) {
-    new_entry->children_.Add(children[i]);
-  }
-  implicit_ref_groups_.Add(new_entry);
+  if (length == 0) return;
+  implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length));
 }
 
 
 void GlobalHandles::RemoveObjectGroups() {
   for (int i = 0; i < object_groups_.length(); i++) {
-    delete object_groups_.at(i);
+    object_groups_.at(i)->Dispose();
   }
   object_groups_.Clear();
 }
@@ -587,7 +584,7 @@
 
 void GlobalHandles::RemoveImplicitRefGroups() {
   for (int i = 0; i < implicit_ref_groups_.length(); i++) {
-    delete implicit_ref_groups_.at(i);
+    implicit_ref_groups_.at(i)->Dispose();
   }
   implicit_ref_groups_.Clear();
 }
diff --git a/src/global-handles.h b/src/global-handles.h
index b77fcb7..a1a269f 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -28,6 +28,8 @@
 #ifndef V8_GLOBAL_HANDLES_H_
 #define V8_GLOBAL_HANDLES_H_
 
+#include "../include/v8-profiler.h"
+
 #include "list-inl.h"
 
 #include "../include/v8-profiler.h"
@@ -44,37 +46,67 @@
 // An object group is treated like a single JS object: if one of object in
 // the group is alive, all objects in the same group are considered alive.
 // An object group is used to simulate object relationship in a DOM tree.
-class ObjectGroup : public Malloced {
+class ObjectGroup {
  public:
-  ObjectGroup() : objects_(4) {}
-  ObjectGroup(size_t capacity, v8::RetainedObjectInfo* info)
-      : objects_(static_cast<int>(capacity)),
-        info_(info) { }
-  ~ObjectGroup();
+  static ObjectGroup* New(Object*** handles,
+                          size_t length,
+                          v8::RetainedObjectInfo* info) {
+    ASSERT(length > 0);
+    ObjectGroup* group = reinterpret_cast<ObjectGroup*>(
+        malloc(OFFSET_OF(ObjectGroup, objects_[length])));
+    group->length_ = length;
+    group->info_ = info;
+    CopyWords(group->objects_, handles, static_cast<int>(length));
+    return group;
+  }
 
-  List<Object**> objects_;
+  void Dispose() {
+    if (info_ != NULL) info_->Dispose();
+    free(this);
+  }
+
+  size_t length_;
   v8::RetainedObjectInfo* info_;
+  Object** objects_[1];  // Variable sized array.
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ObjectGroup);
+  void* operator new(size_t size);
+  void operator delete(void* p);
+  ~ObjectGroup();
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup);
 };
 
 
 // An implicit references group consists of two parts: a parent object and
 // a list of children objects.  If the parent is alive, all the children
 // are alive too.
-class ImplicitRefGroup : public Malloced {
+class ImplicitRefGroup {
  public:
-  ImplicitRefGroup() : children_(4) {}
-  ImplicitRefGroup(HeapObject* parent, size_t capacity)
-      : parent_(parent),
-        children_(static_cast<int>(capacity)) { }
+  static ImplicitRefGroup* New(HeapObject** parent,
+                               Object*** children,
+                               size_t length) {
+    ASSERT(length > 0);
+    ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>(
+        malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
+    group->parent_ = parent;
+    group->length_ = length;
+    CopyWords(group->children_, children, static_cast<int>(length));
+    return group;
+  }
 
-  HeapObject* parent_;
-  List<Object**> children_;
+  void Dispose() {
+    free(this);
+  }
+
+  HeapObject** parent_;
+  size_t length_;
+  Object** children_[1];  // Variable sized array.
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(ImplicitRefGroup);
+  void* operator new(size_t size);
+  void operator delete(void* p);
+  ~ImplicitRefGroup();
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup);
 };
 
 
@@ -156,7 +188,7 @@
   // Add an implicit references' group.
   // Should be only used in GC callback function before a collection.
   // All groups are destroyed after a mark-compact collection.
-  void AddImplicitReferences(HeapObject* parent,
+  void AddImplicitReferences(HeapObject** parent,
                              Object*** children,
                              size_t length);
 
diff --git a/src/handles.cc b/src/handles.cc
index 97a06d9..326de86 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -369,6 +369,17 @@
 }
 
 
+Handle<Object> GetProperty(Handle<JSObject> obj,
+                           Handle<String> name,
+                           LookupResult* result) {
+  PropertyAttributes attributes;
+  Isolate* isolate = Isolate::Current();
+  CALL_HEAP_FUNCTION(isolate,
+                     obj->GetProperty(*obj, result, *name, &attributes),
+                     Object);
+}
+
+
 Handle<Object> GetElement(Handle<Object> obj,
                           uint32_t index) {
   Isolate* isolate = Isolate::Current();
diff --git a/src/handles.h b/src/handles.h
index a357a00..3839f37 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -244,6 +244,11 @@
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
+Handle<Object> GetProperty(Handle<JSObject> obj,
+                           Handle<String> name,
+                           LookupResult* result);
+
+
 Handle<Object> GetElement(Handle<Object> obj,
                           uint32_t index);
 
diff --git a/src/heap.cc b/src/heap.cc
index 5d1a66e..9a3cfe4 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #include "accessors.h"
 #include "api.h"
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compilation-cache.h"
 #include "debug.h"
 #include "heap-profiler.h"
@@ -941,6 +941,8 @@
 
   gc_state_ = SCAVENGE;
 
+  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
   Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
 #ifdef DEBUG
   VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
@@ -1232,6 +1234,32 @@
 }
 
 
+enum LoggingAndProfiling {
+  LOGGING_AND_PROFILING_ENABLED,
+  LOGGING_AND_PROFILING_DISABLED
+};
+
+
+typedef void (*ScavengingCallback)(Map* map,
+                                   HeapObject** slot,
+                                   HeapObject* object);
+
+
+static Atomic32 scavenging_visitors_table_mode_;
+static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+
+INLINE(static void DoScavengeObject(Map* map,
+                                    HeapObject** slot,
+                                    HeapObject* obj));
+
+
+void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+}
+
+
+template<LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1240,23 +1268,22 @@
     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
     table_.Register(kVisitByteArray, &EvacuateByteArray);
     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+
     table_.Register(kVisitGlobalContext,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        VisitSpecialized<Context::kSize>);
-
-    typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
+                        template VisitSpecialized<Context::kSize>);
 
     table_.Register(kVisitConsString,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        VisitSpecialized<ConsString::kSize>);
+                        template VisitSpecialized<ConsString::kSize>);
 
     table_.Register(kVisitSharedFunctionInfo,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        VisitSpecialized<SharedFunctionInfo::kSize>);
+                        template VisitSpecialized<SharedFunctionInfo::kSize>);
 
     table_.Register(kVisitJSFunction,
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    VisitSpecialized<JSFunction::kSize>);
+                        template VisitSpecialized<JSFunction::kSize>);
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1271,12 +1298,10 @@
                                    kVisitStructGeneric>();
   }
 
-
-  static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
-    table_.GetVisitor(map)(map, slot, obj);
+  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+    return &table_;
   }
 
-
  private:
   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
@@ -1313,21 +1338,24 @@
     // Set the forwarding address.
     source->set_map_word(MapWord::FromForwardingAddress(target));
 
+    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
-    // Update NewSpace stats if necessary.
-    RecordCopiedObject(heap, target);
+      // Update NewSpace stats if necessary.
+      RecordCopiedObject(heap, target);
 #endif
-    HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
+      HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
 #if defined(ENABLE_LOGGING_AND_PROFILING)
-    Isolate* isolate = heap->isolate();
-    if (isolate->logger()->is_logging() ||
-        isolate->cpu_profiler()->is_profiling()) {
-      if (target->IsSharedFunctionInfo()) {
-        PROFILE(isolate, SharedFunctionInfoMoveEvent(
-            source->address(), target->address()));
+      Isolate* isolate = heap->isolate();
+      if (isolate->logger()->is_logging() ||
+          isolate->cpu_profiler()->is_profiling()) {
+        if (target->IsSharedFunctionInfo()) {
+          PROFILE(isolate, SharedFunctionInfoMoveEvent(
+              source->address(), target->address()));
+        }
       }
-    }
 #endif
+    }
+
     return target;
   }
 
@@ -1443,7 +1471,7 @@
         return;
       }
 
-      Scavenge(first->map(), slot, first);
+      DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1470,13 +1498,51 @@
     }
   };
 
-  typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
-
-  static VisitorDispatchTable<Callback> table_;
+  static VisitorDispatchTable<ScavengingCallback> table_;
 };
 
 
-VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
+template<LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+    ScavengingVisitor<logging_and_profiling_mode>::table_;
+
+
+static void InitializeScavengingVisitorsTables() {
+  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  scavenging_visitors_table_.CopyFrom(
+      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
+  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+}
+
+
+void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
+  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
+    // Table was already updated by some isolate.
+    return;
+  }
+
+  if (isolate()->logger()->is_logging() ||
+      isolate()->cpu_profiler()->is_profiling() ||
+      (isolate()->heap_profiler() != NULL &&
+       isolate()->heap_profiler()->is_profiling())) {
+    // If one of the isolates is doing scavenge at this moment of time
+    // it might see this table in an inconsitent state when
+    // some of the callbacks point to
+    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
+    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
+    // However this does not lead to any bugs as such isolate does not have
+    // profiling enabled and any isolate with enabled profiling is guaranteed
+    // to see the table in the consistent state.
+    scavenging_visitors_table_.CopyFrom(
+        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+
+    // We use Release_Store to prevent reordering of this write before writes
+    // to the table.
+    Release_Store(&scavenging_visitors_table_mode_,
+                  LOGGING_AND_PROFILING_ENABLED);
+  }
+}
 
 
 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
@@ -1484,7 +1550,7 @@
   MapWord first_word = object->map_word();
   ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  ScavengingVisitor::Scavenge(map, p, object);
+  DoScavengeObject(map, p, object);
 }
 
 
@@ -3165,7 +3231,7 @@
   // Fill these accessors into the dictionary.
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details = descs->GetDetails(i);
+    PropertyDetails details(descs->GetDetails(i));
     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
     PropertyDetails d =
         PropertyDetails(details.attributes(), CALLBACKS, details.index());
@@ -3320,8 +3386,8 @@
   const uc32 kMaxSupportedChar = 0xFFFF;
   // Count the number of characters in the UTF-8 string and check if
   // it is an ASCII string.
-  Access<ScannerConstants::Utf8Decoder>
-      decoder(isolate_->scanner_constants()->utf8_decoder());
+  Access<UnicodeCache::Utf8Decoder>
+      decoder(isolate_->unicode_cache()->utf8_decoder());
   decoder->Reset(string.start(), string.length());
   int chars = 0;
   while (decoder->has_more()) {
@@ -4757,10 +4823,10 @@
   gc_initializer_mutex->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-      initialized_gc = true;
-      ScavengingVisitor::Initialize();
-      NewSpaceScavenger::Initialize();
-      MarkCompactCollector::Initialize();
+    initialized_gc = true;
+    InitializeScavengingVisitorsTables();
+    NewSpaceScavenger::Initialize();
+    MarkCompactCollector::Initialize();
   }
   gc_initializer_mutex->Unlock();
 
diff --git a/src/heap.h b/src/heap.h
index 88074d7..7a1bed3 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -155,6 +155,7 @@
   V(name_symbol, "name")                                                 \
   V(number_symbol, "number")                                             \
   V(Number_symbol, "Number")                                             \
+  V(nan_symbol, "NaN")                                                   \
   V(RegExp_symbol, "RegExp")                                             \
   V(source_symbol, "source")                                             \
   V(global_symbol, "global")                                             \
@@ -1451,6 +1452,8 @@
   // Allocate empty fixed array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
 
+  void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
   // Performs a minor collection in new generation.
   void Scavenge();
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 9bbe164..3b01f57 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -726,7 +726,7 @@
 
 void HChange::PrintDataTo(StringStream* stream) {
   HUnaryOperation::PrintDataTo(stream);
-  stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
+  stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
 
   if (CanTruncateToInt32()) stream->Add(" truncating-int32");
   if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
@@ -1017,10 +1017,9 @@
 
 HConstant::HConstant(Handle<Object> handle, Representation r)
     : handle_(handle),
-      constant_type_(HType::TypeFromValue(handle)),
       has_int32_value_(false),
-      int32_value_(0),
       has_double_value_(false),
+      int32_value_(0),
       double_value_(0)  {
   set_representation(r);
   SetFlag(kUseGVN);
@@ -1050,6 +1049,23 @@
 }
 
 
+bool HConstant::ToBoolean() const {
+  // Converts the constant's boolean value according to
+  // ECMAScript section 9.2 ToBoolean conversion.
+  if (HasInteger32Value()) return Integer32Value() != 0;
+  if (HasDoubleValue()) {
+    double v = DoubleValue();
+    return v != 0 && !isnan(v);
+  }
+  if (handle()->IsTrue()) return true;
+  if (handle()->IsFalse()) return false;
+  if (handle()->IsUndefined()) return false;
+  if (handle()->IsNull()) return false;
+  if (handle()->IsString() &&
+      String::cast(*handle())->length() == 0) return false;
+  return true;
+}
+
 void HConstant::PrintDataTo(StringStream* stream) {
   handle()->ShortPrint(stream);
 }
@@ -1342,18 +1358,29 @@
 }
 
 
-void HLoadGlobal::PrintDataTo(StringStream* stream) {
+void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
   if (check_hole_value()) stream->Add(" (deleteable/read-only)");
 }
 
 
-void HStoreGlobal::PrintDataTo(StringStream* stream) {
+void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
+  stream->Add("%o ", *name());
+}
+
+
+void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
 }
 
 
+void HStoreGlobalGeneric::PrintDataTo(StringStream* stream) {
+  stream->Add("%o = ", *name());
+  value()->PrintNameTo(stream);
+}
+
+
 void HLoadContextSlot::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   stream->Add("[%d]", slot_index());
@@ -1407,7 +1434,7 @@
 
 
 HType HConstant::CalculateInferredType() {
-  return constant_type_;
+  return HType::TypeFromValue(handle_);
 }
 
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fed4b8b..e32a09c 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -114,6 +114,7 @@
   V(HasCachedArrayIndex)                       \
   V(InstanceOf)                                \
   V(InstanceOfKnownGlobal)                     \
+  V(InvokeFunction)                            \
   V(IsNull)                                    \
   V(IsObject)                                  \
   V(IsSmi)                                     \
@@ -124,7 +125,8 @@
   V(LoadElements)                              \
   V(LoadExternalArrayPointer)                  \
   V(LoadFunctionPrototype)                     \
-  V(LoadGlobal)                                \
+  V(LoadGlobalCell)                            \
+  V(LoadGlobalGeneric)                         \
   V(LoadKeyedFastElement)                      \
   V(LoadKeyedGeneric)                          \
   V(LoadKeyedSpecializedArrayElement)          \
@@ -147,12 +149,14 @@
   V(Simulate)                                  \
   V(StackCheck)                                \
   V(StoreContextSlot)                          \
-  V(StoreGlobal)                               \
+  V(StoreGlobalCell)                           \
+  V(StoreGlobalGeneric)                        \
   V(StoreKeyedFastElement)                     \
   V(StoreKeyedSpecializedArrayElement)         \
   V(StoreKeyedGeneric)                         \
   V(StoreNamedField)                           \
   V(StoreNamedGeneric)                         \
+  V(StringAdd)                                 \
   V(StringCharCodeAt)                          \
   V(StringCharFromCode)                        \
   V(StringLength)                              \
@@ -272,7 +276,7 @@
     return kind_ == other.kind_;
   }
 
-  Kind kind() const { return kind_; }
+  Kind kind() const { return static_cast<Kind>(kind_); }
   bool IsNone() const { return kind_ == kNone; }
   bool IsTagged() const { return kind_ == kTagged; }
   bool IsInteger32() const { return kind_ == kInteger32; }
@@ -286,7 +290,10 @@
  private:
   explicit Representation(Kind k) : kind_(k) { }
 
-  Kind kind_;
+  // Make sure kind fits in int8.
+  STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+  int8_t kind_;
 };
 
 
@@ -393,9 +400,12 @@
     kUninitialized = 0x1fff  // 0001 1111 1111 1111
   };
 
+  // Make sure type fits in int16.
+  STATIC_ASSERT(kUninitialized < (1 << (2 * kBitsPerByte)));
+
   explicit HType(Type t) : type_(t) { }
 
-  Type type_;
+  int16_t type_;
 };
 
 
@@ -609,8 +619,8 @@
   int id_;
 
   Representation representation_;
-  SmallPointerList<HValue> uses_;
   HType type_;
+  SmallPointerList<HValue> uses_;
   Range* range_;
   int flags_;
 
@@ -931,7 +941,7 @@
           Representation from,
           Representation to,
           bool is_truncating)
-      : HUnaryOperation(value), from_(from), to_(to) {
+      : HUnaryOperation(value), from_(from) {
     ASSERT(!from.IsNone() && !to.IsNone());
     ASSERT(!from.Equals(to));
     set_representation(to);
@@ -946,7 +956,7 @@
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
   Representation from() const { return from_; }
-  Representation to() const { return to_; }
+  Representation to() const { return representation(); }
   virtual Representation RequiredInputRepresentation(int index) const {
     return from_;
   }
@@ -968,16 +978,14 @@
 
  private:
   Representation from_;
-  Representation to_;
 };
 
 
 class HSimulate: public HInstruction {
  public:
-  HSimulate(int ast_id, int pop_count, int environment_length)
+  HSimulate(int ast_id, int pop_count)
       : ast_id_(ast_id),
         pop_count_(pop_count),
-        environment_length_(environment_length),
         values_(2),
         assigned_indexes_(2) {}
   virtual ~HSimulate() {}
@@ -991,7 +999,6 @@
     ast_id_ = id;
   }
 
-  int environment_length() const { return environment_length_; }
   int pop_count() const { return pop_count_; }
   const ZoneList<HValue*>* values() const { return &values_; }
   int GetAssignedIndexAt(int index) const {
@@ -1037,7 +1044,6 @@
   }
   int ast_id_;
   int pop_count_;
-  int environment_length_;
   ZoneList<HValue*> values_;
   ZoneList<int> assigned_indexes_;
 };
@@ -1239,6 +1245,23 @@
 };
 
 
+class HInvokeFunction: public HBinaryCall {
+ public:
+  HInvokeFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count) {
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  HValue* context() { return first(); }
+  HValue* function() { return second(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke_function")
+};
+
+
 class HCallConstantFunction: public HCall<0> {
  public:
   HCallConstantFunction(Handle<JSFunction> function, int argument_count)
@@ -1703,6 +1726,16 @@
   virtual void Verify();
 #endif
 
+  virtual HValue* Canonicalize() {
+    if (!value()->type().IsUninitialized() &&
+        value()->type().IsString() &&
+        first() == FIRST_STRING_TYPE &&
+        last() == LAST_STRING_TYPE) {
+      return NULL;
+    }
+    return this;
+  }
+
   static HCheckInstanceType* NewIsJSObjectOrJSFunction(HValue* value);
 
   InstanceType first() const { return first_; }
@@ -1744,6 +1777,18 @@
   virtual void Verify();
 #endif
 
+  virtual HValue* Canonicalize() {
+    HType value_type = value()->type();
+    if (!value_type.IsUninitialized() &&
+        (value_type.IsHeapNumber() ||
+         value_type.IsString() ||
+         value_type.IsBoolean() ||
+         value_type.IsNonPrimitive())) {
+      return NULL;
+    }
+    return this;
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check_non_smi")
 
  protected:
@@ -1963,6 +2008,8 @@
   }
   bool HasStringValue() const { return handle_->IsString(); }
 
+  bool ToBoolean() const;
+
   virtual intptr_t Hashcode() {
     ASSERT(!HEAP->allow_allocation(false));
     return reinterpret_cast<intptr_t>(*handle());
@@ -1984,14 +2031,13 @@
 
  private:
   Handle<Object> handle_;
-  HType constant_type_;
 
   // The following two values represent the int32 and the double value of the
   // given constant if there is a lossless conversion between the constant
   // and the specific representation.
-  bool has_int32_value_;
+  bool has_int32_value_ : 1;
+  bool has_double_value_ : 1;
   int32_t int32_value_;
-  bool has_double_value_;
   double double_value_;
 };
 
@@ -2809,9 +2855,9 @@
 };
 
 
-class HLoadGlobal: public HTemplateInstruction<0> {
+class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
       : cell_(cell), check_hole_value_(check_hole_value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
@@ -2832,11 +2878,11 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load_global_cell")
 
  protected:
   virtual bool DataEquals(HValue* other) {
-    HLoadGlobal* b = HLoadGlobal::cast(other);
+    HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
     return cell_.is_identical_to(b->cell());
   }
 
@@ -2846,11 +2892,43 @@
 };
 
 
-class HStoreGlobal: public HUnaryOperation {
+class HLoadGlobalGeneric: public HBinaryOperation {
  public:
-  HStoreGlobal(HValue* value,
-               Handle<JSGlobalPropertyCell> cell,
-               bool check_hole_value)
+  HLoadGlobalGeneric(HValue* context,
+                     HValue* global_object,
+                     Handle<Object> name,
+                     bool for_typeof)
+      : HBinaryOperation(context, global_object),
+        name_(name),
+        for_typeof_(for_typeof) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* global_object() { return OperandAt(1); }
+  Handle<Object> name() const { return name_; }
+  bool for_typeof() const { return for_typeof_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load_global_generic")
+
+ private:
+  Handle<Object> name_;
+  bool for_typeof_;
+};
+
+
+class HStoreGlobalCell: public HUnaryOperation {
+ public:
+  HStoreGlobalCell(HValue* value,
+                   Handle<JSGlobalPropertyCell> cell,
+                   bool check_hole_value)
       : HUnaryOperation(value),
         cell_(cell),
         check_hole_value_(check_hole_value) {
@@ -2865,7 +2943,7 @@
   }
   virtual void PrintDataTo(StringStream* stream);
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store_global")
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store_global_cell")
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
@@ -2873,6 +2951,42 @@
 };
 
 
+class HStoreGlobalGeneric: public HTemplateInstruction<3> {
+ public:
+  HStoreGlobalGeneric(HValue* context,
+                      HValue* global_object,
+                      Handle<Object> name,
+                      HValue* value,
+                      bool strict_mode)
+      : name_(name),
+        strict_mode_(strict_mode) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, global_object);
+    SetOperandAt(2, value);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* global_object() { return OperandAt(1); }
+  Handle<Object> name() const { return name_; }
+  HValue* value() { return OperandAt(2); }
+  bool strict_mode() { return strict_mode_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store_global_generic")
+
+ private:
+  Handle<Object> name_;
+  bool strict_mode_;
+};
+
+
 class HLoadContextSlot: public HUnaryOperation {
  public:
   HLoadContextSlot(HValue* context , int slot_index)
@@ -3201,8 +3315,10 @@
   HStoreNamedGeneric(HValue* context,
                      HValue* object,
                      Handle<String> name,
-                     HValue* value)
-      : name_(name) {
+                     HValue* value,
+                     bool strict_mode)
+      : name_(name),
+        strict_mode_(strict_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, value);
     SetOperandAt(2, context);
@@ -3213,6 +3329,7 @@
   HValue* value() { return OperandAt(1); }
   HValue* context() { return OperandAt(2); }
   Handle<String> name() { return name_; }
+  bool strict_mode() { return strict_mode_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -3224,6 +3341,7 @@
 
  private:
   Handle<String> name_;
+  bool strict_mode_;
 };
 
 
@@ -3301,7 +3419,9 @@
   HStoreKeyedGeneric(HValue* context,
                      HValue* object,
                      HValue* key,
-                     HValue* value) {
+                     HValue* value,
+                     bool strict_mode)
+      : strict_mode_(strict_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, key);
     SetOperandAt(2, value);
@@ -3313,6 +3433,7 @@
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
   HValue* context() { return OperandAt(3); }
+  bool strict_mode() { return strict_mode_; }
 
   virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
@@ -3321,6 +3442,32 @@
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store_keyed_generic")
+
+ private:
+  bool strict_mode_;
+};
+
+
+class HStringAdd: public HBinaryOperation {
+ public:
+  HStringAdd(HValue* left, HValue* right) : HBinaryOperation(left, right) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetFlag(kDependsOnMaps);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  virtual HType CalculateInferredType() {
+    return HType::String();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string_add")
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 2383192..f6c47f3 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -75,7 +75,7 @@
 
 void HBasicBlock::AttachLoopInformation() {
   ASSERT(!IsLoopHeader());
-  loop_information_ = new HLoopInformation(this);
+  loop_information_ = new(zone()) HLoopInformation(this);
 }
 
 
@@ -107,7 +107,7 @@
   ASSERT(!instr->IsLinked());
   ASSERT(!IsFinished());
   if (first_ == NULL) {
-    HBlockEntry* entry = new HBlockEntry();
+    HBlockEntry* entry = new(zone()) HBlockEntry();
     entry->InitializeAsFirst(this);
     first_ = last_ = entry;
   }
@@ -120,7 +120,7 @@
   ASSERT(HasEnvironment());
   HEnvironment* environment = last_environment();
 
-  HDeoptimize* instr = new HDeoptimize(environment->length());
+  HDeoptimize* instr = new(zone()) HDeoptimize(environment->length());
 
   for (int i = 0; i < environment->length(); i++) {
     HValue* val = environment->values()->at(i);
@@ -140,8 +140,7 @@
   int push_count = environment->push_count();
   int pop_count = environment->pop_count();
 
-  int length = environment->length();
-  HSimulate* instr = new HSimulate(id, pop_count, length);
+  HSimulate* instr = new(zone()) HSimulate(id, pop_count);
   for (int i = push_count - 1; i >= 0; --i) {
     instr->AddPushedValue(environment->ExpressionStackAt(i));
   }
@@ -169,11 +168,11 @@
 
 void HBasicBlock::Goto(HBasicBlock* block, bool include_stack_check) {
   if (block->IsInlineReturnTarget()) {
-    AddInstruction(new HLeaveInlined);
+    AddInstruction(new(zone()) HLeaveInlined);
     last_environment_ = last_environment()->outer();
   }
   AddSimulate(AstNode::kNoNumber);
-  HGoto* instr = new HGoto(block);
+  HGoto* instr = new(zone()) HGoto(block);
   instr->set_include_stack_check(include_stack_check);
   Finish(instr);
 }
@@ -182,11 +181,11 @@
 void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
   ASSERT(target->IsInlineReturnTarget());
   ASSERT(return_value != NULL);
-  AddInstruction(new HLeaveInlined);
+  AddInstruction(new(zone()) HLeaveInlined);
   last_environment_ = last_environment()->outer();
   last_environment()->Push(return_value);
   AddSimulate(AstNode::kNoNumber);
-  HGoto* instr = new HGoto(target);
+  HGoto* instr = new(zone()) HGoto(target);
   Finish(instr);
 }
 
@@ -243,7 +242,7 @@
 
 
 void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
-  if (!predecessors_.is_empty()) {
+  if (HasPredecessor()) {
     // Only loop header blocks can have a predecessor added after
     // instructions have been added to the block (they have phis for all
     // values in the environment, these phis may be eliminated later).
@@ -494,8 +493,8 @@
 HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
                                Object* value) {
   if (!pointer->is_set()) {
-    HConstant* constant = new HConstant(Handle<Object>(value),
-                                        Representation::Tagged());
+    HConstant* constant = new(zone()) HConstant(Handle<Object>(value),
+                                                Representation::Tagged());
     constant->InsertAfter(GetConstantUndefined());
     pointer->set(constant);
   }
@@ -581,8 +580,9 @@
       blocks_(8),
       values_(16),
       phi_list_(NULL) {
-  start_environment_ = new HEnvironment(NULL, info->scope(), info->closure());
-  start_environment_->set_ast_id(info->function()->id());
+  start_environment_ =
+      new(zone()) HEnvironment(NULL, info->scope(), info->closure());
+  start_environment_->set_ast_id(AstNode::kFunctionEntryId);
   entry_block_ = CreateBasicBlock();
   entry_block_->SetInitialEnvironment(start_environment_);
 }
@@ -606,7 +606,7 @@
 
   if (!FLAG_use_lithium) return Handle<Code>::null();
 
-  MacroAssembler assembler(NULL, 0);
+  MacroAssembler assembler(info->isolate(), NULL, 0);
   LCodeGen generator(chunk, &assembler, info);
 
   if (FLAG_eliminate_empty_blocks) {
@@ -631,7 +631,7 @@
 
 
 HBasicBlock* HGraph::CreateBasicBlock() {
-  HBasicBlock* result = new HBasicBlock(this);
+  HBasicBlock* result = new(zone()) HBasicBlock(this);
   blocks_.Add(result);
   return result;
 }
@@ -1273,6 +1273,7 @@
 
   HGraph* graph() { return graph_; }
   CompilationInfo* info() { return info_; }
+  Zone* zone() { return graph_->zone(); }
 
   HGraph* graph_;
   CompilationInfo* info_;
@@ -1290,7 +1291,7 @@
   if (FLAG_loop_invariant_code_motion) {
     LoopInvariantCodeMotion();
   }
-  HValueMap* map = new HValueMap();
+  HValueMap* map = new(zone()) HValueMap();
   AnalyzeBlock(graph_->blocks()->at(0), map);
 }
 
@@ -1457,7 +1458,7 @@
   for (int i = 0; i < length; ++i) {
     HBasicBlock* dominated = block->dominated_blocks()->at(i);
     // No need to copy the map for the last child in the dominator tree.
-    HValueMap* successor_map = (i == length - 1) ? map : map->Copy();
+    HValueMap* successor_map = (i == length - 1) ? map : map->Copy(zone());
 
     // If the dominated block is not a successor to this block we have to
     // kill everything killed on any path between this block and the
@@ -1495,6 +1496,8 @@
   void AddDependantsToWorklist(HValue* current);
   void InferBasedOnUses(HValue* current);
 
+  Zone* zone() { return graph_->zone(); }
+
   HGraph* graph_;
   ZoneList<HValue*> worklist_;
   BitVector in_worklist_;
@@ -1606,7 +1609,7 @@
   ScopedVector<BitVector*> connected_phis(num_phis);
   for (int i = 0; i < num_phis; i++) {
     phi_list->at(i)->InitRealUses(i);
-    connected_phis[i] = new BitVector(num_phis);
+    connected_phis[i] = new(zone()) BitVector(num_phis);
     connected_phis[i]->Add(i);
   }
 
@@ -1771,7 +1774,8 @@
   }
 
   if (new_value == NULL) {
-    new_value = new HChange(value, value->representation(), to, is_truncating);
+    new_value =
+        new(zone()) HChange(value, value->representation(), to, is_truncating);
   }
 
   new_value->InsertBefore(next);
@@ -1804,7 +1808,7 @@
     ZoneList<Representation>* to_convert_reps) {
   Representation r = current->representation();
   if (r.IsNone()) return;
-  if (current->uses()->length() == 0) return;
+  if (current->uses()->is_empty()) return;
 
   // Collect the representation changes in a sorted list.  This allows
   // us to avoid duplicate changes without searching the list.
@@ -1980,7 +1984,10 @@
 // Implementation of utility classes to represent an expression's context in
 // the AST.
 AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
-    : owner_(owner), kind_(kind), outer_(owner->ast_context()) {
+    : owner_(owner),
+      kind_(kind),
+      outer_(owner->ast_context()),
+      for_typeof_(false) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
   original_length_ = owner->environment()->length();
@@ -2059,7 +2066,7 @@
   HGraphBuilder* builder = owner();
   HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
-  HTest* test = new HTest(value, empty_true, empty_false);
+  HTest* test = new(zone()) HTest(value, empty_true, empty_false);
   builder->current_block()->Finish(test);
 
   empty_true->Goto(if_true(), false);
@@ -2069,37 +2076,17 @@
 
 
 // HGraphBuilder infrastructure for bailing out and checking bailouts.
-#define BAILOUT(reason)                         \
+#define CHECK_BAILOUT(call)                     \
   do {                                          \
-    Bailout(reason);                            \
-    return;                                     \
-  } while (false)
-
-
-#define CHECK_BAILOUT                           \
-  do {                                          \
+    call;                                       \
     if (HasStackOverflow()) return;             \
   } while (false)
 
 
-#define VISIT_FOR_EFFECT(expr)                  \
-  do {                                          \
-    VisitForEffect(expr);                       \
-    if (HasStackOverflow()) return;             \
-  } while (false)
-
-
-#define VISIT_FOR_VALUE(expr)                   \
-  do {                                          \
-    VisitForValue(expr);                        \
-    if (HasStackOverflow()) return;             \
-  } while (false)
-
-
-#define VISIT_FOR_CONTROL(expr, true_block, false_block)        \
+#define CHECK_ALIVE(call)                                       \
   do {                                                          \
-    VisitForControl(expr, true_block, false_block);             \
-    if (HasStackOverflow()) return;                             \
+    call;                                                       \
+    if (HasStackOverflow() || current_block() == NULL) return;  \
   } while (false)
 
 
@@ -2124,6 +2111,14 @@
 }
 
 
+void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+  ValueContext for_value(this);
+  for_value.set_for_typeof(true);
+  Visit(expr);
+}
+
+
+
 void HGraphBuilder::VisitForControl(Expression* expr,
                                     HBasicBlock* true_block,
                                     HBasicBlock* false_block) {
@@ -2133,28 +2128,27 @@
 
 
 void HGraphBuilder::VisitArgument(Expression* expr) {
-  VISIT_FOR_VALUE(expr);
-  Push(AddInstruction(new HPushArgument(Pop())));
+  CHECK_ALIVE(VisitForValue(expr));
+  Push(AddInstruction(new(zone()) HPushArgument(Pop())));
 }
 
 
 void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
   for (int i = 0; i < arguments->length(); i++) {
-    VisitArgument(arguments->at(i));
-    if (HasStackOverflow() || current_block() == NULL) return;
+    CHECK_ALIVE(VisitArgument(arguments->at(i)));
   }
 }
 
 
 void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
   for (int i = 0; i < exprs->length(); ++i) {
-    VISIT_FOR_VALUE(exprs->at(i));
+    CHECK_ALIVE(VisitForValue(exprs->at(i)));
   }
 }
 
 
 HGraph* HGraphBuilder::CreateGraph() {
-  graph_ = new HGraph(info());
+  graph_ = new(zone()) HGraph(info());
   if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
 
   {
@@ -2168,7 +2162,7 @@
     }
     SetupScope(scope);
     VisitDeclarations(scope->declarations());
-    AddInstruction(new HStackCheck());
+    AddInstruction(new(zone()) HStackCheck());
 
     // Add an edge to the body entry.  This is warty: the graph's start
     // environment will be used by the Lithium translation as the initial
@@ -2188,13 +2182,13 @@
     HEnvironment* initial_env = environment()->CopyWithoutHistory();
     HBasicBlock* body_entry = CreateBasicBlock(initial_env);
     current_block()->Goto(body_entry);
-    body_entry->SetJoinId(info()->function()->id());
+    body_entry->SetJoinId(AstNode::kFunctionEntryId);
     set_current_block(body_entry);
     VisitStatements(info()->function()->body());
     if (HasStackOverflow()) return NULL;
 
     if (current_block() != NULL) {
-      HReturn* instr = new HReturn(graph()->GetConstantUndefined());
+      HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
       current_block()->FinishExit(instr);
       set_current_block(NULL);
     }
@@ -2271,7 +2265,7 @@
   }
 
   while (!arguments.is_empty()) {
-    AddInstruction(new HPushArgument(arguments.RemoveLast()));
+    AddInstruction(new(zone()) HPushArgument(arguments.RemoveLast()));
   }
   return call;
 }
@@ -2279,9 +2273,9 @@
 
 void HGraphBuilder::SetupScope(Scope* scope) {
   // We don't yet handle the function name for named function expressions.
-  if (scope->function() != NULL) BAILOUT("named function expression");
+  if (scope->function() != NULL) return Bailout("named function expression");
 
-  HConstant* undefined_constant = new HConstant(
+  HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
   graph_->set_undefined_constant(undefined_constant);
@@ -2290,7 +2284,7 @@
   // parameter index 0.
   int count = scope->num_parameters() + 1;
   for (int i = 0; i < count; ++i) {
-    HInstruction* parameter = AddInstruction(new HParameter(i));
+    HInstruction* parameter = AddInstruction(new(zone()) HParameter(i));
     environment()->Bind(i, parameter);
   }
 
@@ -2305,9 +2299,9 @@
     if (!scope->arguments()->IsStackAllocated() ||
         (scope->arguments_shadow() != NULL &&
         !scope->arguments_shadow()->IsStackAllocated())) {
-      BAILOUT("context-allocated arguments");
+      return Bailout("context-allocated arguments");
     }
-    HArgumentsObject* object = new HArgumentsObject;
+    HArgumentsObject* object = new(zone()) HArgumentsObject;
     AddInstruction(object);
     graph()->SetArgumentsObject(object);
     environment()->Bind(scope->arguments(), object);
@@ -2320,8 +2314,7 @@
 
 void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
-    Visit(statements->at(i));
-    if (HasStackOverflow() || current_block() == NULL) break;
+    CHECK_ALIVE(Visit(statements->at(i)));
   }
 }
 
@@ -2343,10 +2336,12 @@
 
 
 void HGraphBuilder::VisitBlock(Block* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
-    VisitStatements(stmt->statements());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(VisitStatements(stmt->statements()));
   }
   HBasicBlock* break_block = break_info.break_block();
   if (break_block != NULL) {
@@ -2358,15 +2353,24 @@
 
 
 void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   VisitForEffect(stmt->expression());
 }
 
 
 void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
 }
 
 
 void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (stmt->condition()->ToBooleanIsTrue()) {
     AddSimulate(stmt->ThenId());
     Visit(stmt->then_statement());
@@ -2376,20 +2380,27 @@
   } else {
     HBasicBlock* cond_true = graph()->CreateBasicBlock();
     HBasicBlock* cond_false = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->condition(), cond_true, cond_false);
-    cond_true->SetJoinId(stmt->ThenId());
-    cond_false->SetJoinId(stmt->ElseId());
+    CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
 
-    set_current_block(cond_true);
-    Visit(stmt->then_statement());
-    CHECK_BAILOUT;
-    HBasicBlock* other = current_block();
+    if (cond_true->HasPredecessor()) {
+      cond_true->SetJoinId(stmt->ThenId());
+      set_current_block(cond_true);
+      CHECK_BAILOUT(Visit(stmt->then_statement()));
+      cond_true = current_block();
+    } else {
+      cond_true = NULL;
+    }
 
-    set_current_block(cond_false);
-    Visit(stmt->else_statement());
-    CHECK_BAILOUT;
+    if (cond_false->HasPredecessor()) {
+      cond_false->SetJoinId(stmt->ElseId());
+      set_current_block(cond_false);
+      CHECK_BAILOUT(Visit(stmt->else_statement()));
+      cond_false = current_block();
+    } else {
+      cond_false = NULL;
+    }
 
-    HBasicBlock* join = CreateJoin(other, current_block(), stmt->id());
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->id());
     set_current_block(join);
   }
 }
@@ -2427,6 +2438,9 @@
 
 
 void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
   current_block()->Goto(continue_block);
   set_current_block(NULL);
@@ -2434,6 +2448,9 @@
 
 
 void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
   current_block()->Goto(break_block);
   set_current_block(NULL);
@@ -2441,12 +2458,15 @@
 
 
 void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   AstContext* context = call_context();
   if (context == NULL) {
     // Not an inlined return, so an actual one.
-    VISIT_FOR_VALUE(stmt->expression());
+    CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
-    current_block()->FinishExit(new HReturn(result));
+    current_block()->FinishExit(new(zone()) HReturn(result));
     set_current_block(NULL);
   } else {
     // Return from an inlined function, visit the subexpression in the
@@ -2457,11 +2477,11 @@
                       test->if_true(),
                       test->if_false());
     } else if (context->IsEffect()) {
-      VISIT_FOR_EFFECT(stmt->expression());
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
       current_block()->Goto(function_return(), false);
     } else {
       ASSERT(context->IsValue());
-      VISIT_FOR_VALUE(stmt->expression());
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
       HValue* return_value = environment()->Pop();
       current_block()->AddLeaveInlined(return_value, function_return());
     }
@@ -2471,26 +2491,35 @@
 
 
 void HGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  BAILOUT("WithEnterStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("WithEnterStatement");
 }
 
 
 void HGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
-  BAILOUT("WithExitStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("WithExitStatement");
 }
 
 
 void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // We only optimize switch statements with smi-literal smi comparisons,
   // with a bounded number of clauses.
   const int kCaseClauseLimit = 128;
   ZoneList<CaseClause*>* clauses = stmt->cases();
   int clause_count = clauses->length();
   if (clause_count > kCaseClauseLimit) {
-    BAILOUT("SwitchStatement: too many clauses");
+    return Bailout("SwitchStatement: too many clauses");
   }
 
-  VISIT_FOR_VALUE(stmt->tag());
+  CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
@@ -2501,7 +2530,7 @@
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
     if (!clause->label()->IsSmiLiteral()) {
-      BAILOUT("SwitchStatement: non-literal switch label");
+      return Bailout("SwitchStatement: non-literal switch label");
     }
 
     // Unconditionally deoptimize on the first non-smi compare.
@@ -2513,15 +2542,16 @@
     }
 
     // Otherwise generate a compare and branch.
-    VISIT_FOR_VALUE(clause->label());
+    CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
-    HCompare* compare = new HCompare(tag_value, label_value, Token::EQ_STRICT);
+    HCompare* compare =
+        new(zone()) HCompare(tag_value, label_value, Token::EQ_STRICT);
     compare->SetInputRepresentation(Representation::Integer32());
     ASSERT(!compare->HasSideEffects());
     AddInstruction(compare);
     HBasicBlock* body_block = graph()->CreateBasicBlock();
     HBasicBlock* next_test_block = graph()->CreateBasicBlock();
-    HTest* branch = new HTest(compare, body_block, next_test_block);
+    HTest* branch = new(zone()) HTest(compare, body_block, next_test_block);
     current_block()->Finish(branch);
     set_current_block(next_test_block);
   }
@@ -2574,8 +2604,7 @@
         set_current_block(join);
       }
 
-      VisitStatements(clause->statements());
-      CHECK_BAILOUT;
+      CHECK_BAILOUT(VisitStatements(clause->statements()));
       fall_through_block = current_block();
     }
   }
@@ -2607,7 +2636,7 @@
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
   HBasicBlock* osr_entry = graph()->CreateBasicBlock();
   HValue* true_value = graph()->GetConstantTrue();
-  HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
+  HTest* test = new(zone()) HTest(true_value, non_osr_entry, osr_entry);
   current_block()->Finish(test);
 
   HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
@@ -2621,13 +2650,13 @@
   ASSERT(count ==
          (environment()->parameter_count() + environment()->local_count()));
   for (int i = 0; i < count; ++i) {
-    HUnknownOSRValue* unknown = new HUnknownOSRValue;
+    HUnknownOSRValue* unknown = new(zone()) HUnknownOSRValue;
     AddInstruction(unknown);
     environment()->Bind(i, unknown);
   }
 
   AddSimulate(osr_entry_id);
-  AddInstruction(new HOsrEntry(osr_entry_id));
+  AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
   current_block()->Goto(loop_predecessor);
   loop_predecessor->SetJoinId(statement->EntryId());
   set_current_block(loop_predecessor);
@@ -2635,6 +2664,9 @@
 
 
 void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
@@ -2643,8 +2675,7 @@
 
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(Visit(stmt->body()));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2655,9 +2686,17 @@
     // back edge.
     body_exit = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_exit, loop_successor);
-    body_exit->SetJoinId(stmt->BackEdgeId());
-    loop_successor->SetJoinId(stmt->ExitId());
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+    if (body_exit->HasPredecessor()) {
+      body_exit->SetJoinId(stmt->BackEdgeId());
+    } else {
+      body_exit = NULL;
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
   HBasicBlock* loop_exit = CreateLoop(stmt,
                                       loop_entry,
@@ -2669,6 +2708,9 @@
 
 
 void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
@@ -2680,16 +2722,22 @@
   if (!stmt->cond()->ToBooleanIsTrue()) {
     HBasicBlock* body_entry = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
-    body_entry->SetJoinId(stmt->BodyId());
-    loop_successor->SetJoinId(stmt->ExitId());
-    set_current_block(body_entry);
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(Visit(stmt->body()));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
@@ -2703,9 +2751,11 @@
 
 
 void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (stmt->init() != NULL) {
-    Visit(stmt->init());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(Visit(stmt->init()));
   }
   ASSERT(current_block() != NULL);
   PreProcessOsrEntry(stmt);
@@ -2717,24 +2767,29 @@
   if (stmt->cond() != NULL) {
     HBasicBlock* body_entry = graph()->CreateBasicBlock();
     loop_successor = graph()->CreateBasicBlock();
-    VISIT_FOR_CONTROL(stmt->cond(), body_entry, loop_successor);
-    body_entry->SetJoinId(stmt->BodyId());
-    loop_successor->SetJoinId(stmt->ExitId());
-    set_current_block(body_entry);
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
   }
 
   BreakAndContinueInfo break_info(stmt);
-  { BreakAndContinueScope push(&break_info, this);
-    Visit(stmt->body());
-    CHECK_BAILOUT;
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(Visit(stmt->body()));
   }
   HBasicBlock* body_exit =
       JoinContinue(stmt, current_block(), break_info.continue_block());
 
   if (stmt->next() != NULL && body_exit != NULL) {
     set_current_block(body_exit);
-    Visit(stmt->next());
-    CHECK_BAILOUT;
+    CHECK_BAILOUT(Visit(stmt->next()));
     body_exit = current_block();
   }
 
@@ -2748,100 +2803,147 @@
 
 
 void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
-  BAILOUT("ForInStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("ForInStatement");
 }
 
 
 void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  BAILOUT("TryCatchStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("TryCatchStatement");
 }
 
 
 void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  BAILOUT("TryFinallyStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("TryFinallyStatement");
 }
 
 
 void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  BAILOUT("DebuggerStatement");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("DebuggerStatement");
+}
+
+
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+    Code* unoptimized_code, FunctionLiteral* expr) {
+  int start_position = expr->start_position();
+  RelocIterator it(unoptimized_code);
+  for (;!it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+    Object* obj = rinfo->target_object();
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+      if (shared->start_position() == start_position) {
+        return Handle<SharedFunctionInfo>(shared);
+      }
+    }
+  }
+
+  return Handle<SharedFunctionInfo>();
 }
 
 
 void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Handle<SharedFunctionInfo> shared_info =
-      Compiler::BuildFunctionInfo(expr, info()->script());
-  CHECK_BAILOUT;
+      SearchSharedFunctionInfo(info()->shared_info()->code(),
+                               expr);
+  if (shared_info.is_null()) {
+    shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
+  }
+  // We also have a stack overflow if the recursive compilation did.
+  if (HasStackOverflow()) return;
   HFunctionLiteral* instr =
-      new HFunctionLiteral(shared_info, expr->pretenure());
+      new(zone()) HFunctionLiteral(shared_info, expr->pretenure());
   ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitSharedFunctionInfoLiteral(
     SharedFunctionInfoLiteral* expr) {
-  BAILOUT("SharedFunctionInfoLiteral");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("SharedFunctionInfoLiteral");
 }
 
 
 void HGraphBuilder::VisitConditional(Conditional* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   HBasicBlock* cond_true = graph()->CreateBasicBlock();
   HBasicBlock* cond_false = graph()->CreateBasicBlock();
-  VISIT_FOR_CONTROL(expr->condition(), cond_true, cond_false);
-  cond_true->SetJoinId(expr->ThenId());
-  cond_false->SetJoinId(expr->ElseId());
+  CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
 
   // Visit the true and false subexpressions in the same AST context as the
   // whole expression.
-  set_current_block(cond_true);
-  Visit(expr->then_expression());
-  CHECK_BAILOUT;
-  HBasicBlock* other = current_block();
+  if (cond_true->HasPredecessor()) {
+    cond_true->SetJoinId(expr->ThenId());
+    set_current_block(cond_true);
+    CHECK_BAILOUT(Visit(expr->then_expression()));
+    cond_true = current_block();
+  } else {
+    cond_true = NULL;
+  }
 
-  set_current_block(cond_false);
-  Visit(expr->else_expression());
-  CHECK_BAILOUT;
+  if (cond_false->HasPredecessor()) {
+    cond_false->SetJoinId(expr->ElseId());
+    set_current_block(cond_false);
+    CHECK_BAILOUT(Visit(expr->else_expression()));
+    cond_false = current_block();
+  } else {
+    cond_false = NULL;
+  }
 
   if (!ast_context()->IsTest()) {
-    HBasicBlock* join = CreateJoin(other, current_block(), expr->id());
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
     set_current_block(join);
-    if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+    if (join != NULL && !ast_context()->IsEffect()) {
+      ast_context()->ReturnValue(Pop());
+    }
   }
 }
 
 
-void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
-                                             LookupResult* lookup,
-                                             bool is_store) {
-  if (var->is_this()) {
-    BAILOUT("global this reference");
-  }
-  if (!info()->has_global_object()) {
-    BAILOUT("no global object to optimize VariableProxy");
+HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
+    Variable* var, LookupResult* lookup, bool is_store) {
+  if (var->is_this() || !info()->has_global_object()) {
+    return kUseGeneric;
   }
   Handle<GlobalObject> global(info()->global_object());
   global->Lookup(*var->name(), lookup);
-  if (!lookup->IsProperty()) {
-    BAILOUT("global variable cell not yet introduced");
+  if (!lookup->IsProperty() ||
+      lookup->type() != NORMAL ||
+      (is_store && lookup->IsReadOnly()) ||
+      lookup->holder() != *global) {
+    return kUseGeneric;
   }
-  if (lookup->type() != NORMAL) {
-    BAILOUT("global variable has accessors");
-  }
-  if (is_store && lookup->IsReadOnly()) {
-    BAILOUT("read-only global variable");
-  }
-  if (lookup->holder() != *global) {
-    BAILOUT("global property on prototype of global object");
-  }
+
+  return kUseCell;
 }
 
 
 HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
-  HInstruction* context = new HContext;
+  HInstruction* context = new(zone()) HContext;
   AddInstruction(context);
   int length = info()->scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
-    context = new HOuterContext(context);
+    context = new(zone()) HOuterContext(context);
     AddInstruction(context);
   }
   return context;
@@ -2849,66 +2951,94 @@
 
 
 void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Variable* variable = expr->AsVariable();
   if (variable == NULL) {
-    BAILOUT("reference to rewritten variable");
+    return Bailout("reference to rewritten variable");
   } else if (variable->IsStackAllocated()) {
     if (environment()->Lookup(variable)->CheckFlag(HValue::kIsArguments)) {
-      BAILOUT("unsupported context for arguments object");
+      return Bailout("unsupported context for arguments object");
     }
     ast_context()->ReturnValue(environment()->Lookup(variable));
   } else if (variable->IsContextSlot()) {
     if (variable->mode() == Variable::CONST) {
-      BAILOUT("reference to const context slot");
+      return Bailout("reference to const context slot");
     }
     HValue* context = BuildContextChainWalk(variable);
     int index = variable->AsSlot()->index();
-    HLoadContextSlot* instr = new HLoadContextSlot(context, index);
+    HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, index);
     ast_context()->ReturnInstruction(instr, expr->id());
   } else if (variable->is_global()) {
     LookupResult lookup;
-    LookupGlobalPropertyCell(variable, &lookup, false);
-    CHECK_BAILOUT;
+    GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
 
-    Handle<GlobalObject> global(info()->global_object());
-    // TODO(3039103): Handle global property load through an IC call when access
-    // checks are enabled.
-    if (global->IsAccessCheckNeeded()) {
-      BAILOUT("global object requires access check");
+    if (type == kUseCell &&
+        info()->global_object()->IsAccessCheckNeeded()) {
+      type = kUseGeneric;
     }
-    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-    HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
-    ast_context()->ReturnInstruction(instr, expr->id());
+
+    if (type == kUseCell) {
+      Handle<GlobalObject> global(info()->global_object());
+      Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+      bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+      HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+      ast_context()->ReturnInstruction(instr, expr->id());
+    } else {
+      HContext* context = new(zone()) HContext;
+      AddInstruction(context);
+      HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+      AddInstruction(global_object);
+      HLoadGlobalGeneric* instr =
+          new(zone()) HLoadGlobalGeneric(context,
+                                         global_object,
+                                         variable->name(),
+                                         ast_context()->is_for_typeof());
+      instr->set_position(expr->position());
+      ASSERT(instr->HasSideEffects());
+      ast_context()->ReturnInstruction(instr, expr->id());
+    }
   } else {
-    BAILOUT("reference to a variable which requires dynamic lookup");
+    return Bailout("reference to a variable which requires dynamic lookup");
   }
 }
 
 
 void HGraphBuilder::VisitLiteral(Literal* expr) {
-  HConstant* instr = new HConstant(expr->handle(), Representation::Tagged());
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HConstant* instr =
+      new(zone()) HConstant(expr->handle(), Representation::Tagged());
   ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
-  HRegExpLiteral* instr = new HRegExpLiteral(expr->pattern(),
-                                             expr->flags(),
-                                             expr->literal_index());
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HRegExpLiteral* instr = new(zone()) HRegExpLiteral(expr->pattern(),
+                                                     expr->flags(),
+                                                     expr->literal_index());
   ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
-  HContext* context = new HContext;
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HObjectLiteral* literal = (new HObjectLiteral(context,
-                                                expr->constant_properties(),
-                                                expr->fast_elements(),
-                                                expr->literal_index(),
-                                                expr->depth(),
-                                                expr->has_function()));
+  HObjectLiteral* literal =
+      new(zone()) HObjectLiteral(context,
+                                 expr->constant_properties(),
+                                 expr->fast_elements(),
+                                 expr->literal_index(),
+                                 expr->depth(),
+                                 expr->has_function());
   // The object is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -2929,15 +3059,20 @@
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
           if (property->emit_store()) {
-            VISIT_FOR_VALUE(value);
+            CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
             Handle<String> name = Handle<String>::cast(key->handle());
             HStoreNamedGeneric* store =
-                new HStoreNamedGeneric(context, literal, name, value);
+                new(zone()) HStoreNamedGeneric(
+                                context,
+                                literal,
+                                name,
+                                value,
+                                function_strict_mode());
             AddInstruction(store);
             AddSimulate(key->id());
           } else {
-            VISIT_FOR_EFFECT(value);
+            CHECK_ALIVE(VisitForEffect(value));
           }
           break;
         }
@@ -2945,7 +3080,7 @@
       case ObjectLiteral::Property::PROTOTYPE:
       case ObjectLiteral::Property::SETTER:
       case ObjectLiteral::Property::GETTER:
-        BAILOUT("Object literal with complex property");
+        return Bailout("Object literal with complex property");
       default: UNREACHABLE();
     }
   }
@@ -2956,7 +3091,7 @@
     // of the object. This makes sure that the original object won't
     // be used by other optimized code before it is transformed
     // (e.g. because of code motion).
-    HToFastProperties* result = new HToFastProperties(Pop());
+    HToFastProperties* result = new(zone()) HToFastProperties(Pop());
     AddInstruction(result);
     ast_context()->ReturnValue(result);
   } else {
@@ -2966,13 +3101,16 @@
 
 
 void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
 
-  HArrayLiteral* literal = new HArrayLiteral(expr->constant_elements(),
-                                             length,
-                                             expr->literal_index(),
-                                             expr->depth());
+  HArrayLiteral* literal = new(zone()) HArrayLiteral(expr->constant_elements(),
+                                                     length,
+                                                     expr->literal_index(),
+                                                     expr->depth());
   // The array is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -2985,19 +3123,20 @@
     // is already set in the cloned array.
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
-    VISIT_FOR_VALUE(subexpr);
+    CHECK_ALIVE(VisitForValue(subexpr));
     HValue* value = Pop();
-    if (!Smi::IsValid(i)) BAILOUT("Non-smi key in array literal");
+    if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
 
     // Load the elements array before the first store.
     if (elements == NULL)  {
-     elements = new HLoadElements(literal);
+     elements = new(zone()) HLoadElements(literal);
      AddInstruction(elements);
     }
 
-    HValue* key = AddInstruction(new HConstant(Handle<Object>(Smi::FromInt(i)),
-                                               Representation::Integer32()));
-    AddInstruction(new HStoreKeyedFastElement(elements, key, value));
+    HValue* key = AddInstruction(
+        new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
+                              Representation::Integer32()));
+    AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
     AddSimulate(expr->GetIdForElement(i));
   }
   ast_context()->ReturnValue(Pop());
@@ -3005,7 +3144,10 @@
 
 
 void HGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  BAILOUT("CatchExtensionObject");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("CatchExtensionObject");
 }
 
 
@@ -3041,8 +3183,8 @@
                                                   LookupResult* lookup,
                                                   bool smi_and_map_check) {
   if (smi_and_map_check) {
-    AddInstruction(new HCheckNonSmi(object));
-    AddInstruction(new HCheckMap(object, type));
+    AddInstruction(new(zone()) HCheckNonSmi(object));
+    AddInstruction(new(zone()) HCheckMap(object, type));
   }
 
   int index = ComputeStoredFieldIndex(type, name, lookup);
@@ -3056,7 +3198,7 @@
     offset += FixedArray::kHeaderSize;
   }
   HStoreNamedField* instr =
-      new HStoreNamedField(object, name, value, is_in_object, offset);
+      new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
   if (lookup->type() == MAP_TRANSITION) {
     Handle<Map> transition(lookup->GetTransitionMapFromMap(*type));
     instr->set_transition(transition);
@@ -3071,9 +3213,14 @@
 HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
                                                     Handle<String> name,
                                                     HValue* value) {
-  HContext* context = new HContext;
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  return new HStoreNamedGeneric(context, object, name, value);
+  return new(zone()) HStoreNamedGeneric(
+                         context,
+                         object,
+                         name,
+                         value,
+                         function_strict_mode());
 }
 
 
@@ -3114,13 +3261,14 @@
     LookupResult lookup;
     if (ComputeStoredField(map, name, &lookup)) {
       if (count == 0) {
-        AddInstruction(new HCheckNonSmi(object));  // Only needed once.
+        AddInstruction(new(zone()) HCheckNonSmi(object));  // Only needed once.
         join = graph()->CreateBasicBlock();
       }
       ++count;
       HBasicBlock* if_true = graph()->CreateBasicBlock();
       HBasicBlock* if_false = graph()->CreateBasicBlock();
-      HCompareMap* compare = new HCompareMap(object, map, if_true, if_false);
+      HCompareMap* compare =
+          new(zone()) HCompareMap(object, map, if_true, if_false);
       current_block()->Finish(compare);
 
       set_current_block(if_true);
@@ -3178,14 +3326,14 @@
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
   expr->RecordTypeFeedback(oracle());
-  VISIT_FOR_VALUE(prop->obj());
+  CHECK_ALIVE(VisitForValue(prop->obj()));
 
   HValue* value = NULL;
   HInstruction* instr = NULL;
 
   if (prop->key()->IsPropertyName()) {
     // Named store.
-    VISIT_FOR_VALUE(expr->value());
+    CHECK_ALIVE(VisitForValue(expr->value()));
     value = Pop();
     HValue* object = Pop();
 
@@ -3209,31 +3357,13 @@
 
   } else {
     // Keyed store.
-    VISIT_FOR_VALUE(prop->key());
-    VISIT_FOR_VALUE(expr->value());
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    CHECK_ALIVE(VisitForValue(expr->value()));
     value = Pop();
     HValue* key = Pop();
     HValue* object = Pop();
-
-    if (expr->IsMonomorphic()) {
-      Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
-      // An object has either fast elements or external array elements, but
-      // never both. Pixel array maps that are assigned to pixel array elements
-      // are always created with the fast elements flag cleared.
-      if (receiver_type->has_external_array_elements()) {
-        instr = BuildStoreKeyedSpecializedArrayElement(object,
-                                                       key,
-                                                       value,
-                                                       expr);
-      } else if (receiver_type->has_fast_elements()) {
-        instr = BuildStoreKeyedFastElement(object, key, value, expr);
-      }
-    }
-    if (instr == NULL) {
-      instr = BuildStoreKeyedGeneric(object, key, value);
-    }
+    instr = BuildStoreKeyed(object, key, value, expr);
   }
-
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
@@ -3250,16 +3380,31 @@
                                                    int position,
                                                    int ast_id) {
   LookupResult lookup;
-  LookupGlobalPropertyCell(var, &lookup, true);
-  CHECK_BAILOUT;
-
-  bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-  Handle<GlobalObject> global(info()->global_object());
-  Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-  HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
-  instr->set_position(position);
-  AddInstruction(instr);
-  if (instr->HasSideEffects()) AddSimulate(ast_id);
+  GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+  if (type == kUseCell) {
+    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+    Handle<GlobalObject> global(info()->global_object());
+    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+    HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
+    instr->set_position(position);
+    AddInstruction(instr);
+    if (instr->HasSideEffects()) AddSimulate(ast_id);
+  } else {
+    HContext* context = new(zone()) HContext;
+    AddInstruction(context);
+    HGlobalObject* global_object = new(zone()) HGlobalObject(context);
+    AddInstruction(global_object);
+    HStoreGlobalGeneric* instr =
+        new(zone()) HStoreGlobalGeneric(context,
+                                        global_object,
+                                        var->name(),
+                                        value,
+                                        function_strict_mode());
+    instr->set_position(position);
+    AddInstruction(instr);
+    ASSERT(instr->HasSideEffects());
+    if (instr->HasSideEffects()) AddSimulate(ast_id);
+  }
 }
 
 
@@ -3275,7 +3420,7 @@
   BinaryOperation* operation = expr->binary_operation();
 
   if (var != NULL) {
-    VISIT_FOR_VALUE(operation);
+    CHECK_ALIVE(VisitForValue(operation));
 
     if (var->is_global()) {
       HandleGlobalVariableAssignment(var,
@@ -3287,11 +3432,12 @@
     } else if (var->IsContextSlot()) {
       HValue* context = BuildContextChainWalk(var);
       int index = var->AsSlot()->index();
-      HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+      HStoreContextSlot* instr =
+          new(zone()) HStoreContextSlot(context, index, Top());
       AddInstruction(instr);
       if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
     } else {
-      BAILOUT("compound assignment to lookup slot");
+      return Bailout("compound assignment to lookup slot");
     }
     ast_context()->ReturnValue(Pop());
 
@@ -3300,7 +3446,7 @@
 
     if (prop->key()->IsPropertyName()) {
       // Named property.
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* obj = Top();
 
       HInstruction* load = NULL;
@@ -3314,7 +3460,7 @@
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
       HValue* left = Pop();
 
@@ -3332,20 +3478,16 @@
 
     } else {
       // Keyed property.
-      VISIT_FOR_VALUE(prop->obj());
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
-      bool is_fast_elements = prop->IsMonomorphic() &&
-          prop->GetMonomorphicReceiverType()->has_fast_elements();
-      HInstruction* load = is_fast_elements
-          ? BuildLoadKeyedFastElement(obj, key, prop)
-          : BuildLoadKeyedGeneric(obj, key);
+      HInstruction* load = BuildLoadKeyed(obj, key, prop);
       PushAndAdd(load);
       if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
       HValue* left = Pop();
 
@@ -3353,9 +3495,8 @@
       PushAndAdd(instr);
       if (instr->HasSideEffects()) AddSimulate(operation->id());
 
-      HInstruction* store = is_fast_elements
-          ? BuildStoreKeyedFastElement(obj, key, instr, prop)
-          : BuildStoreKeyedGeneric(obj, key, instr);
+      expr->RecordTypeFeedback(oracle());
+      HInstruction* store = BuildStoreKeyed(obj, key, instr, expr);
       AddInstruction(store);
       // Drop the simulated receiver, key, and value.  Return the value.
       Drop(3);
@@ -3365,12 +3506,15 @@
     }
 
   } else {
-    BAILOUT("invalid lhs in compound assignment");
+    return Bailout("invalid lhs in compound assignment");
   }
 }
 
 
 void HGraphBuilder::VisitAssignment(Assignment* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   VariableProxy* proxy = expr->target()->AsVariableProxy();
   Variable* var = proxy->AsVariable();
   Property* prop = expr->target()->AsProperty();
@@ -3382,7 +3526,7 @@
   }
 
   if (var != NULL) {
-    if (proxy->IsArguments()) BAILOUT("assignment to arguments");
+    if (proxy->IsArguments()) return Bailout("assignment to arguments");
 
     // Handle the assignment.
     if (var->IsStackAllocated()) {
@@ -3396,23 +3540,24 @@
       if (rhs_var != NULL && rhs_var->IsStackAllocated()) {
         value = environment()->Lookup(rhs_var);
       } else {
-        VISIT_FOR_VALUE(expr->value());
+        CHECK_ALIVE(VisitForValue(expr->value()));
         value = Pop();
       }
       Bind(var, value);
       ast_context()->ReturnValue(value);
 
     } else if (var->IsContextSlot() && var->mode() != Variable::CONST) {
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* context = BuildContextChainWalk(var);
       int index = var->AsSlot()->index();
-      HStoreContextSlot* instr = new HStoreContextSlot(context, index, Top());
+      HStoreContextSlot* instr =
+          new(zone()) HStoreContextSlot(context, index, Top());
       AddInstruction(instr);
       if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
       ast_context()->ReturnValue(Pop());
 
     } else if (var->is_global()) {
-      VISIT_FOR_VALUE(expr->value());
+      CHECK_ALIVE(VisitForValue(expr->value()));
       HandleGlobalVariableAssignment(var,
                                      Top(),
                                      expr->position(),
@@ -3420,30 +3565,33 @@
       ast_context()->ReturnValue(Pop());
 
     } else {
-      BAILOUT("assignment to LOOKUP or const CONTEXT variable");
+      return Bailout("assignment to LOOKUP or const CONTEXT variable");
     }
 
   } else if (prop != NULL) {
     HandlePropertyAssignment(expr);
   } else {
-    BAILOUT("invalid left-hand side in assignment");
+    return Bailout("invalid left-hand side in assignment");
   }
 }
 
 
 void HGraphBuilder::VisitThrow(Throw* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // We don't optimize functions with invalid left-hand sides in
   // assignments, count operations, or for-in.  Consequently throw can
   // currently only occur in an effect context.
   ASSERT(ast_context()->IsEffect());
-  VISIT_FOR_VALUE(expr->exception());
+  CHECK_ALIVE(VisitForValue(expr->exception()));
 
   HValue* value = environment()->Pop();
-  HThrow* instr = new HThrow(value);
+  HThrow* instr = new(zone()) HThrow(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
   AddSimulate(expr->id());
-  current_block()->FinishExit(new HAbnormalExit);
+  current_block()->FinishExit(new(zone()) HAbnormalExit);
   set_current_block(NULL);
 }
 
@@ -3454,8 +3602,8 @@
                                                     LookupResult* lookup,
                                                     bool smi_and_map_check) {
   if (smi_and_map_check) {
-    AddInstruction(new HCheckNonSmi(object));
-    AddInstruction(new HCheckMap(object, type));
+    AddInstruction(new(zone()) HCheckNonSmi(object));
+    AddInstruction(new(zone()) HCheckMap(object, type));
   }
 
   int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -3463,11 +3611,11 @@
     // Negative property indices are in-object properties, indexed
     // from the end of the fixed part of the object.
     int offset = (index * kPointerSize) + type->instance_size();
-    return new HLoadNamedField(object, true, offset);
+    return new(zone()) HLoadNamedField(object, true, offset);
   } else {
     // Non-negative property indices are in the properties array.
     int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
-    return new HLoadNamedField(object, false, offset);
+    return new(zone()) HLoadNamedField(object, false, offset);
   }
 }
 
@@ -3476,9 +3624,9 @@
                                                    Property* expr) {
   ASSERT(expr->key()->IsPropertyName());
   Handle<Object> name = expr->key()->AsLiteral()->handle();
-  HContext* context = new HContext;
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  return new HLoadNamedGeneric(context, obj, name);
+  return new(zone()) HLoadNamedGeneric(context, obj, name);
 }
 
 
@@ -3495,10 +3643,10 @@
                                &lookup,
                                true);
   } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
-    AddInstruction(new HCheckNonSmi(obj));
-    AddInstruction(new HCheckMap(obj, map));
+    AddInstruction(new(zone()) HCheckNonSmi(obj));
+    AddInstruction(new(zone()) HCheckMap(obj, map));
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
-    return new HConstant(function, Representation::Tagged());
+    return new(zone()) HConstant(function, Representation::Tagged());
   } else {
     return BuildLoadNamedGeneric(obj, expr);
   }
@@ -3507,9 +3655,9 @@
 
 HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
                                                    HValue* key) {
-  HContext* context = new HContext;
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  return new HLoadKeyedGeneric(context, object, key);
+  return new(zone()) HLoadKeyedGeneric(context, object, key);
 }
 
 
@@ -3517,23 +3665,23 @@
                                                        HValue* key,
                                                        Property* expr) {
   ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
-  AddInstruction(new HCheckNonSmi(object));
+  AddInstruction(new(zone()) HCheckNonSmi(object));
   Handle<Map> map = expr->GetMonomorphicReceiverType();
   ASSERT(map->has_fast_elements());
-  AddInstruction(new HCheckMap(object, map));
+  AddInstruction(new(zone()) HCheckMap(object, map));
   bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
-  HLoadElements* elements = new HLoadElements(object);
+  HLoadElements* elements = new(zone()) HLoadElements(object);
   HInstruction* length = NULL;
   if (is_array) {
-    length = AddInstruction(new HJSArrayLength(object));
-    AddInstruction(new HBoundsCheck(key, length));
+    length = AddInstruction(new(zone()) HJSArrayLength(object));
+    AddInstruction(new(zone()) HBoundsCheck(key, length));
     AddInstruction(elements);
   } else {
     AddInstruction(elements);
-    length = AddInstruction(new HFixedArrayLength(elements));
-    AddInstruction(new HBoundsCheck(key, length));
+    length = AddInstruction(new(zone()) HFixedArrayLength(elements));
+    AddInstruction(new(zone()) HBoundsCheck(key, length));
   }
-  return new HLoadKeyedFastElement(elements, key);
+  return new(zone()) HLoadKeyedFastElement(elements, key);
 }
 
 
@@ -3542,33 +3690,55 @@
     HValue* key,
     Property* expr) {
   ASSERT(!expr->key()->IsPropertyName() && expr->IsMonomorphic());
-  AddInstruction(new HCheckNonSmi(object));
+  AddInstruction(new(zone()) HCheckNonSmi(object));
   Handle<Map> map = expr->GetMonomorphicReceiverType();
   ASSERT(!map->has_fast_elements());
   ASSERT(map->has_external_array_elements());
-  AddInstruction(new HCheckMap(object, map));
-  HLoadElements* elements = new HLoadElements(object);
+  AddInstruction(new(zone()) HCheckMap(object, map));
+  HLoadElements* elements = new(zone()) HLoadElements(object);
   AddInstruction(elements);
-  HInstruction* length = new HExternalArrayLength(elements);
+  HInstruction* length = new(zone()) HExternalArrayLength(elements);
   AddInstruction(length);
-  AddInstruction(new HBoundsCheck(key, length));
+  AddInstruction(new(zone()) HBoundsCheck(key, length));
   HLoadExternalArrayPointer* external_elements =
-      new HLoadExternalArrayPointer(elements);
+      new(zone()) HLoadExternalArrayPointer(elements);
   AddInstruction(external_elements);
   HLoadKeyedSpecializedArrayElement* pixel_array_value =
-      new HLoadKeyedSpecializedArrayElement(external_elements,
-                                            key,
-                                            expr->GetExternalArrayType());
+      new(zone()) HLoadKeyedSpecializedArrayElement(
+          external_elements, key, expr->external_array_type());
   return pixel_array_value;
 }
 
 
+HInstruction* HGraphBuilder::BuildLoadKeyed(HValue* obj,
+                                            HValue* key,
+                                            Property* prop) {
+  if (prop->IsMonomorphic()) {
+    Handle<Map> receiver_type(prop->GetMonomorphicReceiverType());
+    // An object has either fast elements or pixel array elements, but never
+    // both. Pixel array maps that are assigned to pixel array elements are
+    // always created with the fast elements flag cleared.
+    if (receiver_type->has_external_array_elements()) {
+      return BuildLoadKeyedSpecializedArrayElement(obj, key, prop);
+    } else if (receiver_type->has_fast_elements()) {
+      return BuildLoadKeyedFastElement(obj, key, prop);
+    }
+  }
+  return BuildLoadKeyedGeneric(obj, key);
+}
+
+
 HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
                                                     HValue* key,
                                                     HValue* value) {
-  HContext* context = new HContext;
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  return new HStoreKeyedGeneric(context, object, key, value);
+  return new(zone()) HStoreKeyedGeneric(
+                         context,
+                         object,
+                         key,
+                         value,
+                         function_strict_mode());
 }
 
 
@@ -3577,22 +3747,22 @@
                                                         HValue* val,
                                                         Expression* expr) {
   ASSERT(expr->IsMonomorphic());
-  AddInstruction(new HCheckNonSmi(object));
+  AddInstruction(new(zone()) HCheckNonSmi(object));
   Handle<Map> map = expr->GetMonomorphicReceiverType();
   ASSERT(map->has_fast_elements());
-  AddInstruction(new HCheckMap(object, map));
-  HInstruction* elements = AddInstruction(new HLoadElements(object));
-  AddInstruction(new HCheckMap(elements,
-                               isolate()->factory()->fixed_array_map()));
+  AddInstruction(new(zone()) HCheckMap(object, map));
+  HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
+  AddInstruction(new(zone()) HCheckMap(
+      elements, isolate()->factory()->fixed_array_map()));
   bool is_array = (map->instance_type() == JS_ARRAY_TYPE);
   HInstruction* length = NULL;
   if (is_array) {
-    length = AddInstruction(new HJSArrayLength(object));
+    length = AddInstruction(new(zone()) HJSArrayLength(object));
   } else {
-    length = AddInstruction(new HFixedArrayLength(elements));
+    length = AddInstruction(new(zone()) HFixedArrayLength(elements));
   }
-  AddInstruction(new HBoundsCheck(key, length));
-  return new HStoreKeyedFastElement(elements, key, val);
+  AddInstruction(new(zone()) HBoundsCheck(key, length));
+  return new(zone()) HStoreKeyedFastElement(elements, key, val);
 }
 
 
@@ -3600,25 +3770,48 @@
     HValue* object,
     HValue* key,
     HValue* val,
-    Assignment* expr) {
+    Expression* expr) {
   ASSERT(expr->IsMonomorphic());
-  AddInstruction(new HCheckNonSmi(object));
+  AddInstruction(new(zone()) HCheckNonSmi(object));
   Handle<Map> map = expr->GetMonomorphicReceiverType();
   ASSERT(!map->has_fast_elements());
   ASSERT(map->has_external_array_elements());
-  AddInstruction(new HCheckMap(object, map));
-  HLoadElements* elements = new HLoadElements(object);
+  AddInstruction(new(zone()) HCheckMap(object, map));
+  HLoadElements* elements = new(zone()) HLoadElements(object);
   AddInstruction(elements);
-  HInstruction* length = AddInstruction(new HExternalArrayLength(elements));
-  AddInstruction(new HBoundsCheck(key, length));
+  HInstruction* length = AddInstruction(
+      new(zone()) HExternalArrayLength(elements));
+  AddInstruction(new(zone()) HBoundsCheck(key, length));
   HLoadExternalArrayPointer* external_elements =
-      new HLoadExternalArrayPointer(elements);
+      new(zone()) HLoadExternalArrayPointer(elements);
   AddInstruction(external_elements);
-  return new HStoreKeyedSpecializedArrayElement(
+  return new(zone()) HStoreKeyedSpecializedArrayElement(
       external_elements,
       key,
       val,
-      expr->GetExternalArrayType());
+      expr->external_array_type());
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreKeyed(HValue* object,
+                                             HValue* key,
+                                             HValue* value,
+                                             Expression* expr) {
+  if (expr->IsMonomorphic()) {
+    Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
+    // An object has either fast elements or external array elements, but
+    // never both. Pixel array maps that are assigned to pixel array elements
+    // are always created with the fast elements flag cleared.
+    if (receiver_type->has_external_array_elements()) {
+      return BuildStoreKeyedSpecializedArrayElement(object,
+                                                    key,
+                                                    value,
+                                                    expr);
+    } else if (receiver_type->has_fast_elements()) {
+      return BuildStoreKeyedFastElement(object, key, value, expr);
+    }
+  }
+  return BuildStoreKeyedGeneric(object, key, value);
 }
 
 
@@ -3634,18 +3827,19 @@
   if (expr->key()->IsPropertyName()) {
     Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
     if (!name->IsEqualTo(CStrVector("length"))) return false;
-    HInstruction* elements = AddInstruction(new HArgumentsElements);
-    result = new HArgumentsLength(elements);
+    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+    result = new(zone()) HArgumentsLength(elements);
   } else {
     Push(graph()->GetArgumentsObject());
     VisitForValue(expr->key());
-    if (HasStackOverflow()) return false;
+    if (HasStackOverflow() || current_block() == NULL) return true;
     HValue* key = Pop();
     Drop(1);  // Arguments object.
-    HInstruction* elements = AddInstruction(new HArgumentsElements);
-    HInstruction* length = AddInstruction(new HArgumentsLength(elements));
-    AddInstruction(new HBoundsCheck(key, length));
-    result = new HAccessArgumentsAt(elements, length, key);
+    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+    HInstruction* length = AddInstruction(
+        new(zone()) HArgumentsLength(elements));
+    AddInstruction(new(zone()) HBoundsCheck(key, length));
+    result = new(zone()) HAccessArgumentsAt(elements, length, key);
   }
   ast_context()->ReturnInstruction(result, expr->id());
   return true;
@@ -3653,39 +3847,43 @@
 
 
 void HGraphBuilder::VisitProperty(Property* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   expr->RecordTypeFeedback(oracle());
 
   if (TryArgumentsAccess(expr)) return;
-  CHECK_BAILOUT;
 
-  VISIT_FOR_VALUE(expr->obj());
+  CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
   if (expr->IsArrayLength()) {
     HValue* array = Pop();
-    AddInstruction(new HCheckNonSmi(array));
-    AddInstruction(new HCheckInstanceType(array, JS_ARRAY_TYPE, JS_ARRAY_TYPE));
-    instr = new HJSArrayLength(array);
+    AddInstruction(new(zone()) HCheckNonSmi(array));
+    AddInstruction(new(zone()) HCheckInstanceType(array,
+                                                  JS_ARRAY_TYPE,
+                                                  JS_ARRAY_TYPE));
+    instr = new(zone()) HJSArrayLength(array);
 
   } else if (expr->IsStringLength()) {
     HValue* string = Pop();
-    AddInstruction(new HCheckNonSmi(string));
-    AddInstruction(new HCheckInstanceType(string,
-                                          FIRST_STRING_TYPE,
-                                          LAST_STRING_TYPE));
-    instr = new HStringLength(string);
+    AddInstruction(new(zone()) HCheckNonSmi(string));
+    AddInstruction(new(zone()) HCheckInstanceType(string,
+                                                  FIRST_STRING_TYPE,
+                                                  LAST_STRING_TYPE));
+    instr = new(zone()) HStringLength(string);
   } else if (expr->IsStringAccess()) {
-    VISIT_FOR_VALUE(expr->key());
+    CHECK_ALIVE(VisitForValue(expr->key()));
     HValue* index = Pop();
     HValue* string = Pop();
     HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
     AddInstruction(char_code);
-    instr = new HStringCharFromCode(char_code);
+    instr = new(zone()) HStringCharFromCode(char_code);
 
   } else if (expr->IsFunctionPrototype()) {
     HValue* function = Pop();
-    AddInstruction(new HCheckNonSmi(function));
-    instr = new HLoadFunctionPrototype(function);
+    AddInstruction(new(zone()) HCheckNonSmi(function));
+    instr = new(zone()) HLoadFunctionPrototype(function);
 
   } else if (expr->key()->IsPropertyName()) {
     Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
@@ -3695,32 +3893,18 @@
     if (expr->IsMonomorphic()) {
       instr = BuildLoadNamed(obj, expr, types->first(), name);
     } else if (types != NULL && types->length() > 1) {
-      AddInstruction(new HCheckNonSmi(obj));
-      instr = new HLoadNamedFieldPolymorphic(obj, types, name);
+      AddInstruction(new(zone()) HCheckNonSmi(obj));
+      instr = new(zone()) HLoadNamedFieldPolymorphic(obj, types, name);
     } else {
       instr = BuildLoadNamedGeneric(obj, expr);
     }
 
   } else {
-    VISIT_FOR_VALUE(expr->key());
+    CHECK_ALIVE(VisitForValue(expr->key()));
 
     HValue* key = Pop();
     HValue* obj = Pop();
-
-    if (expr->IsMonomorphic()) {
-      Handle<Map> receiver_type(expr->GetMonomorphicReceiverType());
-      // An object has either fast elements or pixel array elements, but never
-      // both. Pixel array maps that are assigned to pixel array elements are
-      // always created with the fast elements flag cleared.
-      if (receiver_type->has_external_array_elements()) {
-        instr = BuildLoadKeyedSpecializedArrayElement(obj, key, expr);
-      } else if (receiver_type->has_fast_elements()) {
-        instr = BuildLoadKeyedFastElement(obj, key, expr);
-      }
-    }
-    if (instr == NULL) {
-      instr = BuildLoadKeyedGeneric(obj, key);
-    }
+    instr = BuildLoadKeyed(obj, key, expr);
   }
   instr->set_position(expr->position());
   ast_context()->ReturnInstruction(instr, expr->id());
@@ -3735,11 +3919,11 @@
   // are overwritten.  Therefore it is enough to check the map of the holder and
   // its prototypes.
   if (smi_and_map_check) {
-    AddInstruction(new HCheckNonSmi(receiver));
-    AddInstruction(new HCheckMap(receiver, receiver_map));
+    AddInstruction(new(zone()) HCheckNonSmi(receiver));
+    AddInstruction(new(zone()) HCheckMap(receiver, receiver_map));
   }
   if (!expr->holder().is_null()) {
-    AddInstruction(new HCheckPrototypeMaps(
+    AddInstruction(new(zone()) HCheckPrototypeMaps(
         Handle<JSObject>(JSObject::cast(receiver_map->prototype())),
         expr->holder()));
   }
@@ -3760,13 +3944,15 @@
     Handle<Map> map = types->at(i);
     if (expr->ComputeTarget(map, name)) {
       if (count == 0) {
-        AddInstruction(new HCheckNonSmi(receiver));  // Only needed once.
+        // Only needed once.
+        AddInstruction(new(zone()) HCheckNonSmi(receiver));
         join = graph()->CreateBasicBlock();
       }
       ++count;
       HBasicBlock* if_true = graph()->CreateBasicBlock();
       HBasicBlock* if_false = graph()->CreateBasicBlock();
-      HCompareMap* compare = new HCompareMap(receiver, map, if_true, if_false);
+      HCompareMap* compare =
+          new(zone()) HCompareMap(receiver, map, if_true, if_false);
       current_block()->Finish(compare);
 
       set_current_block(if_true);
@@ -3775,12 +3961,13 @@
         PrintF("Trying to inline the polymorphic call to %s\n",
                *name->ToCString());
       }
-      if (!FLAG_polymorphic_inlining || !TryInline(expr)) {
-        // Check for bailout, as trying to inline might fail due to bailout
-        // during hydrogen processing.
-        CHECK_BAILOUT;
+      if (FLAG_polymorphic_inlining && TryInline(expr)) {
+        // Trying to inline will signal that we should bailout from the
+        // entire compilation by setting stack overflow on the visitor.
+        if (HasStackOverflow()) return;
+      } else {
         HCallConstantFunction* call =
-            new HCallConstantFunction(expr->target(), argument_count);
+            new(zone()) HCallConstantFunction(expr->target(), argument_count);
         call->set_position(expr->position());
         PreProcessCall(call);
         AddInstruction(call);
@@ -3798,9 +3985,9 @@
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
     current_block()->FinishExitWithDeoptimization();
   } else {
-    HContext* context = new HContext;
+    HContext* context = new(zone()) HContext;
     AddInstruction(context);
-    HCallNamed* call = new HCallNamed(context, name, argument_count);
+    HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count);
     call->set_position(expr->position());
     PreProcessCall(call);
 
@@ -3818,22 +4005,30 @@
   // even without predecessors to the join block, we set it as the exit
   // block and continue by adding instructions there.
   ASSERT(join != NULL);
-  set_current_block(join);
   if (join->HasPredecessor()) {
+    set_current_block(join);
     join->SetJoinId(expr->id());
     if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+  } else {
+    set_current_block(NULL);
   }
 }
 
 
 void HGraphBuilder::TraceInline(Handle<JSFunction> target, const char* reason) {
   if (FLAG_trace_inlining) {
-    SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
-    SmartPointer<char> caller =
-        info()->function()->debug_name()->ToCString();
     if (reason == NULL) {
+      // We are currently in the context of inlined function thus we have
+      // to go to an outer FunctionState to get caller.
+      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
+      SmartPointer<char> caller =
+          function_state()->outer()->compilation_info()->function()->
+              debug_name()->ToCString();
       PrintF("Inlined %s called from %s.\n", *callee, *caller);
     } else {
+      SmartPointer<char> callee = target->shared()->DebugName()->ToCString();
+      SmartPointer<char> caller =
+          info()->function()->debug_name()->ToCString();
       PrintF("Did not inline %s called from %s (%s).\n",
              *callee, *caller, reason);
     }
@@ -3870,11 +4065,16 @@
     return false;
   }
 
-  // Don't inline deeper than two calls.
+  // Don't inline deeper than kMaxInliningLevels calls.
   HEnvironment* env = environment();
-  if (env->outer() != NULL && env->outer()->outer() != NULL) {
-    TraceInline(target, "inline depth limit reached");
-    return false;
+  int current_level = 1;
+  while (env->outer() != NULL) {
+    if (current_level == Compiler::kMaxInliningLevels) {
+      TraceInline(target, "inline depth limit reached");
+      return false;
+    }
+    current_level++;
+    env = env->outer();
   }
 
   // Don't inline recursive functions.
@@ -3976,13 +4176,13 @@
 
   body_entry->SetJoinId(expr->ReturnId());
   set_current_block(body_entry);
-  AddInstruction(new HEnterInlined(target, function));
+  AddInstruction(new(zone()) HEnterInlined(target, function));
   VisitStatements(function->body());
   if (HasStackOverflow()) {
     // Bail out if the inline function did, as we cannot residualize a call
     // instead.
     TraceInline(target, "inline graph construction failed");
-    return false;
+    return true;
   }
 
   // Update inlined nodes count.
@@ -4009,7 +4209,7 @@
       // TODO(3168478): refactor to avoid this.
       HBasicBlock* empty_true = graph()->CreateBasicBlock();
       HBasicBlock* empty_false = graph()->CreateBasicBlock();
-      HTest* test = new HTest(undefined, empty_true, empty_false);
+      HTest* test = new(zone()) HTest(undefined, empty_true, empty_false);
       current_block()->Finish(test);
 
       empty_true->Goto(inlined_test_context()->if_true(), false);
@@ -4038,9 +4238,11 @@
     // flow to handle.
     set_current_block(NULL);
 
-  } else {
+  } else if (function_return()->HasPredecessor()) {
     function_return()->SetJoinId(expr->id());
     set_current_block(function_return());
+  } else {
+    set_current_block(NULL);
   }
 
   return true;
@@ -4063,7 +4265,7 @@
         HValue* index = Pop();
         HValue* string = Pop();
         ASSERT(!expr->holder().is_null());
-        AddInstruction(new HCheckPrototypeMaps(
+        AddInstruction(new(zone()) HCheckPrototypeMaps(
             oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
             expr->holder()));
         HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
@@ -4072,7 +4274,8 @@
           return true;
         }
         AddInstruction(char_code);
-        HStringCharFromCode* result = new HStringCharFromCode(char_code);
+        HStringCharFromCode* result =
+            new(zone()) HStringCharFromCode(char_code);
         ast_context()->ReturnInstruction(result, expr->id());
         return true;
       }
@@ -4088,7 +4291,7 @@
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
         HValue* argument = Pop();
         Drop(1);  // Receiver.
-        HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
+        HUnaryMathOperation* op = new(zone()) HUnaryMathOperation(argument, id);
         op->set_position(expr->position());
         ast_context()->ReturnInstruction(op, expr->id());
         return true;
@@ -4105,30 +4308,30 @@
         if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
           double exponent = HConstant::cast(right)->DoubleValue();
           if (exponent == 0.5) {
-            result = new HUnaryMathOperation(left, kMathPowHalf);
+            result = new(zone()) HUnaryMathOperation(left, kMathPowHalf);
           } else if (exponent == -0.5) {
             HConstant* double_one =
-                new HConstant(Handle<Object>(Smi::FromInt(1)),
-                              Representation::Double());
+                new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)),
+                                      Representation::Double());
             AddInstruction(double_one);
             HUnaryMathOperation* square_root =
-                new HUnaryMathOperation(left, kMathPowHalf);
+                new(zone()) HUnaryMathOperation(left, kMathPowHalf);
             AddInstruction(square_root);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
             ASSERT(!square_root->HasSideEffects());
-            result = new HDiv(double_one, square_root);
+            result = new(zone()) HDiv(double_one, square_root);
           } else if (exponent == 2.0) {
-            result = new HMul(left, left);
+            result = new(zone()) HMul(left, left);
           }
         } else if (right->IsConstant() &&
                    HConstant::cast(right)->HasInteger32Value() &&
                    HConstant::cast(right)->Integer32Value() == 2) {
-          result = new HMul(left, left);
+          result = new(zone()) HMul(left, left);
         }
 
         if (result == NULL) {
-          result = new HPower(left, right);
+          result = new(zone()) HPower(left, right);
         }
         ast_context()->ReturnInstruction(result, expr->id());
         return true;
@@ -4165,19 +4368,19 @@
 
   // Found pattern f.apply(receiver, arguments).
   VisitForValue(prop->obj());
-  if (HasStackOverflow()) return false;
+  if (HasStackOverflow() || current_block() == NULL) return true;
   HValue* function = Pop();
   VisitForValue(args->at(0));
-  if (HasStackOverflow()) return false;
+  if (HasStackOverflow() || current_block() == NULL) return true;
   HValue* receiver = Pop();
-  HInstruction* elements = AddInstruction(new HArgumentsElements);
-  HInstruction* length = AddInstruction(new HArgumentsLength(elements));
+  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+  HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
   AddCheckConstantFunction(expr,
                            function,
                            expr->GetReceiverTypes()->first(),
                            true);
   HInstruction* result =
-      new HApplyArguments(function, receiver, length, elements);
+      new(zone()) HApplyArguments(function, receiver, length, elements);
   result->set_position(expr->position());
   ast_context()->ReturnInstruction(result, expr->id());
   return true;
@@ -4185,6 +4388,9 @@
 
 
 void HGraphBuilder::VisitCall(Call* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Expression* callee = expr->expression();
   int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
   HInstruction* call = NULL;
@@ -4193,21 +4399,21 @@
   if (prop != NULL) {
     if (!prop->key()->IsPropertyName()) {
       // Keyed function call.
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
 
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->key()));
       // Push receiver and key like the non-optimized code generator expects it.
       HValue* key = Pop();
       HValue* receiver = Pop();
       Push(key);
       Push(receiver);
 
-      VisitExpressions(expr->arguments());
-      CHECK_BAILOUT;
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-      HContext* context = new HContext;
+      HContext* context = new(zone()) HContext;
       AddInstruction(context);
-      call = PreProcessCall(new HCallKeyed(context, key, argument_count));
+      call = PreProcessCall(
+          new(zone()) HCallKeyed(context, key, argument_count));
       call->set_position(expr->position());
       Drop(1);  // Key.
       ast_context()->ReturnInstruction(call, expr->id());
@@ -4218,11 +4424,9 @@
     expr->RecordTypeFeedback(oracle());
 
     if (TryCallApply(expr)) return;
-    CHECK_BAILOUT;
 
-    VISIT_FOR_VALUE(prop->obj());
-    VisitExpressions(expr->arguments());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
     Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
 
@@ -4246,21 +4450,17 @@
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
         // when a primitive receiver check is required.
-        HContext* context = new HContext;
+        HContext* context = new(zone()) HContext;
         AddInstruction(context);
-        call = PreProcessCall(new HCallNamed(context, name, argument_count));
+        call = PreProcessCall(
+            new(zone()) HCallNamed(context, name, argument_count));
       } else {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
 
-        if (TryInline(expr)) {
-          return;
-        } else {
-          // Check for bailout, as the TryInline call in the if condition above
-          // might return false due to bailout during hydrogen processing.
-          CHECK_BAILOUT;
-          call = PreProcessCall(new HCallConstantFunction(expr->target(),
-                                                          argument_count));
-        }
+        if (TryInline(expr)) return;
+        call = PreProcessCall(
+            new(zone()) HCallConstantFunction(expr->target(),
+                                              argument_count));
       }
     } else if (types != NULL && types->length() > 1) {
       ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
@@ -4268,9 +4468,10 @@
       return;
 
     } else {
-      HContext* context = new HContext;
+      HContext* context = new(zone()) HContext;
       AddInstruction(context);
-      call = PreProcessCall(new HCallNamed(context, name, argument_count));
+      call = PreProcessCall(
+          new(zone()) HCallNamed(context, name, argument_count));
     }
 
   } else {
@@ -4279,7 +4480,7 @@
 
     if (!global_call) {
       ++argument_count;
-      VISIT_FOR_VALUE(expr->expression());
+      CHECK_ALIVE(VisitForValue(expr->expression()));
     }
 
     if (global_call) {
@@ -4287,27 +4488,29 @@
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      if (info()->has_global_object() &&
+      LookupResult lookup;
+      GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+      if (type == kUseCell &&
           !info()->global_object()->IsAccessCheckNeeded()) {
         Handle<GlobalObject> global(info()->global_object());
-        known_global_function = expr->ComputeGlobalTarget(global, var->name());
+        known_global_function = expr->ComputeGlobalTarget(global, &lookup);
       }
       if (known_global_function) {
         // Push the global object instead of the global receiver because
         // code generated by the full code generator expects it.
-        HContext* context = new HContext;
-        HGlobalObject* global_object = new HGlobalObject(context);
+        HContext* context = new(zone()) HContext;
+        HGlobalObject* global_object = new(zone()) HGlobalObject(context);
         AddInstruction(context);
         PushAndAdd(global_object);
-        VisitExpressions(expr->arguments());
-        CHECK_BAILOUT;
+        CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-        VISIT_FOR_VALUE(expr->expression());
+        CHECK_ALIVE(VisitForValue(expr->expression()));
         HValue* function = Pop();
-        AddInstruction(new HCheckFunction(function, expr->target()));
+        AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
 
         // Replace the global object with the global receiver.
-        HGlobalReceiver* global_receiver = new HGlobalReceiver(global_object);
+        HGlobalReceiver* global_receiver =
+            new(zone()) HGlobalReceiver(global_object);
         // Index of the receiver from the top of the expression stack.
         const int receiver_index = argument_count - 1;
         AddInstruction(global_receiver);
@@ -4315,37 +4518,29 @@
                IsGlobalObject());
         environment()->SetExpressionStackAt(receiver_index, global_receiver);
 
-        if (TryInline(expr)) {
-          return;
-        }
-        // Check for bailout, as trying to inline might fail due to bailout
-        // during hydrogen processing.
-        CHECK_BAILOUT;
-
-        call = PreProcessCall(new HCallKnownGlobal(expr->target(),
-                                                   argument_count));
+        if (TryInline(expr)) return;
+        call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
+                                                           argument_count));
       } else {
-        HContext* context = new HContext;
+        HContext* context = new(zone()) HContext;
         AddInstruction(context);
-        PushAndAdd(new HGlobalObject(context));
-        VisitExpressions(expr->arguments());
-        CHECK_BAILOUT;
+        PushAndAdd(new(zone()) HGlobalObject(context));
+        CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-        call = PreProcessCall(new HCallGlobal(context,
+        call = PreProcessCall(new(zone()) HCallGlobal(context,
                                               var->name(),
                                               argument_count));
       }
 
     } else {
-      HContext* context = new HContext;
-      HGlobalObject* global_object = new HGlobalObject(context);
+      HContext* context = new(zone()) HContext;
+      HGlobalObject* global_object = new(zone()) HGlobalObject(context);
       AddInstruction(context);
       AddInstruction(global_object);
-      PushAndAdd(new HGlobalReceiver(global_object));
-      VisitExpressions(expr->arguments());
-      CHECK_BAILOUT;
+      PushAndAdd(new(zone()) HGlobalReceiver(global_object));
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-      call = PreProcessCall(new HCallFunction(context, argument_count));
+      call = PreProcessCall(new(zone()) HCallFunction(context, argument_count));
     }
   }
 
@@ -4355,20 +4550,22 @@
 
 
 void HGraphBuilder::VisitCallNew(CallNew* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   // The constructor function is also used as the receiver argument to the
   // JS construct call builtin.
-  VISIT_FOR_VALUE(expr->expression());
-  VisitExpressions(expr->arguments());
-  CHECK_BAILOUT;
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  CHECK_ALIVE(VisitExpressions(expr->arguments()));
 
-  HContext* context = new HContext;
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
 
   // The constructor is both an operand to the instruction and an argument
   // to the construct call.
   int arg_count = expr->arguments()->length() + 1;  // Plus constructor.
   HValue* constructor = environment()->ExpressionStackAt(arg_count - 1);
-  HCallNew* call = new HCallNew(context, constructor, arg_count);
+  HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
   call->set_position(expr->position());
   PreProcessCall(call);
   ast_context()->ReturnInstruction(call, expr->id());
@@ -4391,8 +4588,11 @@
 
 
 void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (expr->is_jsruntime()) {
-    BAILOUT("call to a JavaScript runtime function");
+    return Bailout("call to a JavaScript runtime function");
   }
 
   const Runtime::Function* function = expr->function();
@@ -4412,12 +4612,12 @@
     (this->*generator)(expr);
   } else {
     ASSERT(function->intrinsic_type == Runtime::RUNTIME);
-    VisitArgumentList(expr->arguments());
-    CHECK_BAILOUT;
+    CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
     Handle<String> name = expr->name();
     int argument_count = expr->arguments()->length();
-    HCallRuntime* call = new HCallRuntime(name, function, argument_count);
+    HCallRuntime* call =
+        new(zone()) HCallRuntime(name, function, argument_count);
     call->set_position(RelocInfo::kNoPosition);
     Drop(argument_count);
     ast_context()->ReturnInstruction(call, expr->id());
@@ -4426,9 +4626,12 @@
 
 
 void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   Token::Value op = expr->op();
   if (op == Token::VOID) {
-    VISIT_FOR_EFFECT(expr->expression());
+    CHECK_ALIVE(VisitForEffect(expr->expression()));
     ast_context()->ReturnValue(graph()->GetConstantUndefined());
   } else if (op == Token::DELETE) {
     Property* prop = expr->expression()->AsProperty();
@@ -4436,7 +4639,7 @@
     if (prop == NULL && var == NULL) {
       // Result of deleting non-property, non-variable reference is true.
       // Evaluate the subexpression for side effects.
-      VISIT_FOR_EFFECT(expr->expression());
+      CHECK_ALIVE(VisitForEffect(expr->expression()));
       ast_context()->ReturnValue(graph()->GetConstantTrue());
     } else if (var != NULL &&
                !var->is_global() &&
@@ -4451,17 +4654,17 @@
         // to accesses on the arguments object.
         ast_context()->ReturnValue(graph()->GetConstantFalse());
       } else {
-        VISIT_FOR_VALUE(prop->obj());
-        VISIT_FOR_VALUE(prop->key());
+        CHECK_ALIVE(VisitForValue(prop->obj()));
+        CHECK_ALIVE(VisitForValue(prop->key()));
         HValue* key = Pop();
         HValue* obj = Pop();
-        HDeleteProperty* instr = new HDeleteProperty(obj, key);
+        HDeleteProperty* instr = new(zone()) HDeleteProperty(obj, key);
         ast_context()->ReturnInstruction(instr, expr->id());
       }
     } else if (var->is_global()) {
-      BAILOUT("delete with global variable");
+      return Bailout("delete with global variable");
     } else {
-      BAILOUT("delete with non-global variable");
+      return Bailout("delete with non-global variable");
     }
   } else if (op == Token::NOT) {
     if (ast_context()->IsTest()) {
@@ -4472,47 +4675,56 @@
     } else if (ast_context()->IsValue()) {
       HBasicBlock* materialize_false = graph()->CreateBasicBlock();
       HBasicBlock* materialize_true = graph()->CreateBasicBlock();
-      VISIT_FOR_CONTROL(expr->expression(),
-                        materialize_false,
-                        materialize_true);
-      materialize_false->SetJoinId(expr->expression()->id());
-      materialize_true->SetJoinId(expr->expression()->id());
+      CHECK_BAILOUT(VisitForControl(expr->expression(),
+                                    materialize_false,
+                                    materialize_true));
 
-      set_current_block(materialize_false);
-      Push(graph()->GetConstantFalse());
-      set_current_block(materialize_true);
-      Push(graph()->GetConstantTrue());
+      if (materialize_false->HasPredecessor()) {
+        materialize_false->SetJoinId(expr->expression()->id());
+        set_current_block(materialize_false);
+        Push(graph()->GetConstantFalse());
+      } else {
+        materialize_false = NULL;
+      }
+
+      if (materialize_true->HasPredecessor()) {
+        materialize_true->SetJoinId(expr->expression()->id());
+        set_current_block(materialize_true);
+        Push(graph()->GetConstantTrue());
+      } else {
+        materialize_true = NULL;
+      }
 
       HBasicBlock* join =
           CreateJoin(materialize_false, materialize_true, expr->id());
       set_current_block(join);
-      ast_context()->ReturnValue(Pop());
+      if (join != NULL) ast_context()->ReturnValue(Pop());
     } else {
       ASSERT(ast_context()->IsEffect());
       VisitForEffect(expr->expression());
     }
 
   } else if (op == Token::TYPEOF) {
-    VISIT_FOR_VALUE(expr->expression());
+    CHECK_ALIVE(VisitForTypeOf(expr->expression()));
     HValue* value = Pop();
-    ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
+    ast_context()->ReturnInstruction(new(zone()) HTypeof(value), expr->id());
 
   } else {
-    VISIT_FOR_VALUE(expr->expression());
+    CHECK_ALIVE(VisitForValue(expr->expression()));
     HValue* value = Pop();
     HInstruction* instr = NULL;
     switch (op) {
       case Token::BIT_NOT:
-        instr = new HBitNot(value);
+        instr = new(zone()) HBitNot(value);
         break;
       case Token::SUB:
-        instr = new HMul(value, graph_->GetConstantMinus1());
+        instr = new(zone()) HMul(value, graph_->GetConstantMinus1());
         break;
       case Token::ADD:
-        instr = new HMul(value, graph_->GetConstant1());
+        instr = new(zone()) HMul(value, graph_->GetConstant1());
         break;
       default:
-        BAILOUT("Value: unsupported unary operation");
+        return Bailout("Value: unsupported unary operation");
         break;
     }
     ast_context()->ReturnInstruction(instr, expr->id());
@@ -4520,26 +4732,21 @@
 }
 
 
-void HGraphBuilder::VisitIncrementOperation(IncrementOperation* expr) {
-  // IncrementOperation is never visited by the visitor. It only
-  // occurs as a subexpression of CountOperation.
-  UNREACHABLE();
-}
-
-
 HInstruction* HGraphBuilder::BuildIncrement(HValue* value, bool increment) {
   HConstant* delta = increment
       ? graph_->GetConstant1()
       : graph_->GetConstantMinus1();
-  HInstruction* instr = new HAdd(value, delta);
+  HInstruction* instr = new(zone()) HAdd(value, delta);
   AssumeRepresentation(instr,  Representation::Integer32());
   return instr;
 }
 
 
 void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
-  IncrementOperation* increment = expr->increment();
-  Expression* target = increment->expression();
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  Expression* target = expr->expression();
   VariableProxy* proxy = target->AsVariableProxy();
   Variable* var = proxy->AsVariable();
   Property* prop = target->AsProperty();
@@ -4547,7 +4754,7 @@
   bool inc = expr->op() == Token::INC;
 
   if (var != NULL) {
-    VISIT_FOR_VALUE(target);
+    CHECK_ALIVE(VisitForValue(target));
 
     // Match the full code generator stack by simulating an extra stack
     // element for postfix operations in a non-effect context.
@@ -4567,11 +4774,12 @@
     } else if (var->IsContextSlot()) {
       HValue* context = BuildContextChainWalk(var);
       int index = var->AsSlot()->index();
-      HStoreContextSlot* instr = new HStoreContextSlot(context, index, after);
+      HStoreContextSlot* instr =
+          new(zone()) HStoreContextSlot(context, index, after);
       AddInstruction(instr);
       if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
     } else {
-      BAILOUT("lookup variable in count operation");
+      return Bailout("lookup variable in count operation");
     }
     Drop(has_extra ? 2 : 1);
     ast_context()->ReturnValue(expr->is_postfix() ? before : after);
@@ -4587,7 +4795,7 @@
       bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
       if (has_extra) Push(graph_->GetConstantUndefined());
 
-      VISIT_FOR_VALUE(prop->obj());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
       HValue* obj = Top();
 
       HInstruction* load = NULL;
@@ -4599,7 +4807,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(increment->id());
+      if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
       HValue* before = Pop();
       // There is no deoptimization to after the increment, so we don't need
@@ -4628,19 +4836,14 @@
       bool has_extra = expr->is_postfix() && !ast_context()->IsEffect();
       if (has_extra) Push(graph_->GetConstantUndefined());
 
-      VISIT_FOR_VALUE(prop->obj());
-      VISIT_FOR_VALUE(prop->key());
+      CHECK_ALIVE(VisitForValue(prop->obj()));
+      CHECK_ALIVE(VisitForValue(prop->key()));
       HValue* obj = environment()->ExpressionStackAt(1);
       HValue* key = environment()->ExpressionStackAt(0);
 
-      bool is_fast_elements = prop->IsMonomorphic() &&
-          prop->GetMonomorphicReceiverType()->has_fast_elements();
-
-      HInstruction* load = is_fast_elements
-          ? BuildLoadKeyedFastElement(obj, key, prop)
-          : BuildLoadKeyedGeneric(obj, key);
+      HInstruction* load = BuildLoadKeyed(obj, key, prop);
       PushAndAdd(load);
-      if (load->HasSideEffects()) AddSimulate(increment->id());
+      if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
       HValue* before = Pop();
       // There is no deoptimization to after the increment, so we don't need
@@ -4648,9 +4851,8 @@
       HInstruction* after = BuildIncrement(before, inc);
       AddInstruction(after);
 
-      HInstruction* store = is_fast_elements
-          ? BuildStoreKeyedFastElement(obj, key, after, prop)
-          : BuildStoreKeyedGeneric(obj, key, after);
+      expr->RecordTypeFeedback(oracle());
+      HInstruction* store = BuildStoreKeyed(obj, key, after, expr);
       AddInstruction(store);
 
       // Drop the key from the bailout environment.  Overwrite the receiver
@@ -4666,65 +4868,75 @@
     }
 
   } else {
-    BAILOUT("invalid lhs in count operation");
+    return Bailout("invalid lhs in count operation");
   }
 }
 
 
 HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
                                                         HValue* index) {
-  AddInstruction(new HCheckNonSmi(string));
-  AddInstruction(new HCheckInstanceType(
+  AddInstruction(new(zone()) HCheckNonSmi(string));
+  AddInstruction(new(zone()) HCheckInstanceType(
       string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
-  HStringLength* length = new HStringLength(string);
+  HStringLength* length = new(zone()) HStringLength(string);
   AddInstruction(length);
-  AddInstruction(new HBoundsCheck(index, length));
-  return new HStringCharCodeAt(string, index);
+  AddInstruction(new(zone()) HBoundsCheck(index, length));
+  return new(zone()) HStringCharCodeAt(string, index);
 }
 
 
 HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
                                                   HValue* left,
                                                   HValue* right) {
+  TypeInfo info = oracle()->BinaryType(expr);
   HInstruction* instr = NULL;
   switch (expr->op()) {
     case Token::ADD:
-      instr = new HAdd(left, right);
+      if (info.IsString()) {
+        AddInstruction(new(zone()) HCheckNonSmi(left));
+        AddInstruction(new(zone()) HCheckInstanceType(
+            left, FIRST_STRING_TYPE, LAST_STRING_TYPE));
+        AddInstruction(new(zone()) HCheckNonSmi(right));
+        AddInstruction(new(zone()) HCheckInstanceType(
+            right, FIRST_STRING_TYPE, LAST_STRING_TYPE));
+        instr = new(zone()) HStringAdd(left, right);
+      } else {
+        instr = new(zone()) HAdd(left, right);
+      }
       break;
     case Token::SUB:
-      instr = new HSub(left, right);
+      instr = new(zone()) HSub(left, right);
       break;
     case Token::MUL:
-      instr = new HMul(left, right);
+      instr = new(zone()) HMul(left, right);
       break;
     case Token::MOD:
-      instr = new HMod(left, right);
+      instr = new(zone()) HMod(left, right);
       break;
     case Token::DIV:
-      instr = new HDiv(left, right);
+      instr = new(zone()) HDiv(left, right);
       break;
     case Token::BIT_XOR:
-      instr = new HBitXor(left, right);
+      instr = new(zone()) HBitXor(left, right);
       break;
     case Token::BIT_AND:
-      instr = new HBitAnd(left, right);
+      instr = new(zone()) HBitAnd(left, right);
       break;
     case Token::BIT_OR:
-      instr = new HBitOr(left, right);
+      instr = new(zone()) HBitOr(left, right);
       break;
     case Token::SAR:
-      instr = new HSar(left, right);
+      instr = new(zone()) HSar(left, right);
       break;
     case Token::SHR:
-      instr = new HShr(left, right);
+      instr = new(zone()) HShr(left, right);
       break;
     case Token::SHL:
-      instr = new HShl(left, right);
+      instr = new(zone()) HShl(left, right);
       break;
     default:
       UNREACHABLE();
   }
-  TypeInfo info = oracle()->BinaryType(expr);
   // If we hit an uninitialized binary op stub we will get type info
   // for a smi operation. If one of the operands is a constant string
   // do not generate code assuming it is a smi operation.
@@ -4761,8 +4973,11 @@
 
 
 void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (expr->op() == Token::COMMA) {
-    VISIT_FOR_EFFECT(expr->left());
+    CHECK_ALIVE(VisitForEffect(expr->left()));
     // Visit the right subexpression in the same AST context as the entire
     // expression.
     Visit(expr->right());
@@ -4774,32 +4989,38 @@
       // Translate left subexpression.
       HBasicBlock* eval_right = graph()->CreateBasicBlock();
       if (is_logical_and) {
-        VISIT_FOR_CONTROL(expr->left(), eval_right, context->if_false());
+        CHECK_BAILOUT(VisitForControl(expr->left(),
+                                      eval_right,
+                                      context->if_false()));
       } else {
-        VISIT_FOR_CONTROL(expr->left(), context->if_true(), eval_right);
+        CHECK_BAILOUT(VisitForControl(expr->left(),
+                                      context->if_true(),
+                                      eval_right));
       }
-      eval_right->SetJoinId(expr->RightId());
 
       // Translate right subexpression by visiting it in the same AST
       // context as the entire expression.
-      set_current_block(eval_right);
-      Visit(expr->right());
+      if (eval_right->HasPredecessor()) {
+        eval_right->SetJoinId(expr->RightId());
+        set_current_block(eval_right);
+        Visit(expr->right());
+      }
 
     } else if (ast_context()->IsValue()) {
-      VISIT_FOR_VALUE(expr->left());
+      CHECK_ALIVE(VisitForValue(expr->left()));
       ASSERT(current_block() != NULL);
 
       // We need an extra block to maintain edge-split form.
       HBasicBlock* empty_block = graph()->CreateBasicBlock();
       HBasicBlock* eval_right = graph()->CreateBasicBlock();
       HTest* test = is_logical_and
-          ? new HTest(Top(), eval_right, empty_block)
-          : new HTest(Top(), empty_block, eval_right);
+          ? new(zone()) HTest(Top(), eval_right, empty_block)
+          : new(zone()) HTest(Top(), empty_block, eval_right);
       current_block()->Finish(test);
 
       set_current_block(eval_right);
       Drop(1);  // Value of the left subexpression.
-      VISIT_FOR_VALUE(expr->right());
+      CHECK_BAILOUT(VisitForValue(expr->right()));
 
       HBasicBlock* join_block =
           CreateJoin(empty_block, current_block(), expr->id());
@@ -4813,33 +5034,42 @@
       // extra block to maintain edge-split form.
       HBasicBlock* empty_block = graph()->CreateBasicBlock();
       HBasicBlock* right_block = graph()->CreateBasicBlock();
-      HBasicBlock* join_block = graph()->CreateBasicBlock();
       if (is_logical_and) {
-        VISIT_FOR_CONTROL(expr->left(), right_block, empty_block);
+        CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
       } else {
-        VISIT_FOR_CONTROL(expr->left(), empty_block, right_block);
+        CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
       }
+
       // TODO(kmillikin): Find a way to fix this.  It's ugly that there are
       // actually two empty blocks (one here and one inserted by
       // TestContext::BuildBranch, and that they both have an HSimulate
       // though the second one is not a merge node, and that we really have
       // no good AST ID to put on that first HSimulate.
-      empty_block->SetJoinId(expr->id());
-      right_block->SetJoinId(expr->RightId());
-      set_current_block(right_block);
-      VISIT_FOR_EFFECT(expr->right());
+      if (empty_block->HasPredecessor()) {
+        empty_block->SetJoinId(expr->id());
+      } else {
+        empty_block = NULL;
+      }
 
-      empty_block->Goto(join_block);
-      current_block()->Goto(join_block);
-      join_block->SetJoinId(expr->id());
+      if (right_block->HasPredecessor()) {
+        right_block->SetJoinId(expr->RightId());
+        set_current_block(right_block);
+        CHECK_BAILOUT(VisitForEffect(expr->right()));
+        right_block = current_block();
+      } else {
+        right_block = NULL;
+      }
+
+      HBasicBlock* join_block =
+          CreateJoin(empty_block, right_block, expr->id());
       set_current_block(join_block);
       // We did not materialize any value in the predecessor environments,
       // so there is no need to handle it here.
     }
 
   } else {
-    VISIT_FOR_VALUE(expr->left());
-    VISIT_FOR_VALUE(expr->right());
+    CHECK_ALIVE(VisitForValue(expr->left()));
+    CHECK_ALIVE(VisitForValue(expr->right()));
 
     HValue* right = Pop();
     HValue* left = Pop();
@@ -4878,13 +5108,16 @@
 
 
 void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
   if (IsClassOfTest(expr)) {
     CallRuntime* call = expr->left()->AsCallRuntime();
-    VISIT_FOR_VALUE(call->arguments()->at(0));
+    CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
     HValue* value = Pop();
     Literal* literal = expr->right()->AsLiteral();
     Handle<String> rhs = Handle<String>::cast(literal->handle());
-    HInstruction* instr = new HClassOfTest(value, rhs);
+    HInstruction* instr = new(zone()) HClassOfTest(value, rhs);
     instr->set_position(expr->position());
     ast_context()->ReturnInstruction(instr, expr->id());
     return;
@@ -4896,17 +5129,17 @@
   if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
       left_unary != NULL && left_unary->op() == Token::TYPEOF &&
       right_literal != NULL && right_literal->handle()->IsString()) {
-    VISIT_FOR_VALUE(left_unary->expression());
+    CHECK_ALIVE(VisitForTypeOf(left_unary->expression()));
     HValue* left = Pop();
-    HInstruction* instr = new HTypeofIs(left,
+    HInstruction* instr = new(zone()) HTypeofIs(left,
         Handle<String>::cast(right_literal->handle()));
     instr->set_position(expr->position());
     ast_context()->ReturnInstruction(instr, expr->id());
     return;
   }
 
-  VISIT_FOR_VALUE(expr->left());
-  VISIT_FOR_VALUE(expr->right());
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
 
   HValue* right = Pop();
   HValue* left = Pop();
@@ -4943,32 +5176,32 @@
     // If the target is not null we have found a known global function that is
     // assumed to stay the same for this instanceof.
     if (target.is_null()) {
-      HContext* context = new HContext;
+      HContext* context = new(zone()) HContext;
       AddInstruction(context);
-      instr = new HInstanceOf(context, left, right);
+      instr = new(zone()) HInstanceOf(context, left, right);
     } else {
-      AddInstruction(new HCheckFunction(right, target));
-      instr = new HInstanceOfKnownGlobal(left, target);
+      AddInstruction(new(zone()) HCheckFunction(right, target));
+      instr = new(zone()) HInstanceOfKnownGlobal(left, target);
     }
   } else if (op == Token::IN) {
-    BAILOUT("Unsupported comparison: in");
+    return Bailout("Unsupported comparison: in");
   } else if (type_info.IsNonPrimitive()) {
     switch (op) {
       case Token::EQ:
       case Token::EQ_STRICT: {
-        AddInstruction(new HCheckNonSmi(left));
+        AddInstruction(new(zone()) HCheckNonSmi(left));
         AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(left));
-        AddInstruction(new HCheckNonSmi(right));
+        AddInstruction(new(zone()) HCheckNonSmi(right));
         AddInstruction(HCheckInstanceType::NewIsJSObjectOrJSFunction(right));
-        instr = new HCompareJSObjectEq(left, right);
+        instr = new(zone()) HCompareJSObjectEq(left, right);
         break;
       }
       default:
-        BAILOUT("Unsupported non-primitive compare");
+        return Bailout("Unsupported non-primitive compare");
         break;
     }
   } else {
-    HCompare* compare = new HCompare(left, right, op);
+    HCompare* compare = new(zone()) HCompare(left, right, op);
     Representation r = ToRepresentation(type_info);
     compare->SetInputRepresentation(r);
     instr = compare;
@@ -4979,16 +5212,22 @@
 
 
 void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
-  VISIT_FOR_VALUE(expr->expression());
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  CHECK_ALIVE(VisitForValue(expr->expression()));
 
   HValue* value = Pop();
-  HIsNull* compare = new HIsNull(value, expr->is_strict());
+  HIsNull* compare = new(zone()) HIsNull(value, expr->is_strict());
   ast_context()->ReturnInstruction(compare, expr->id());
 }
 
 
 void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
-  BAILOUT("ThisFunction");
+  ASSERT(!HasStackOverflow());
+  ASSERT(current_block() != NULL);
+  ASSERT(current_block()->HasPredecessor());
+  return Bailout("ThisFunction");
 }
 
 
@@ -5003,7 +5242,7 @@
       (slot != NULL && slot->type() == Slot::LOOKUP) ||
       decl->mode() == Variable::CONST ||
       decl->fun() != NULL) {
-    BAILOUT("unsupported declaration");
+    return Bailout("unsupported declaration");
   }
 }
 
@@ -5012,107 +5251,118 @@
 // Support for types.
 void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HIsSmi* result = new HIsSmi(value);
+  HIsSmi* result = new(zone()) HIsSmi(value);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceType* result =
-      new HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
+      new(zone()) HHasInstanceType(value, FIRST_JS_OBJECT_TYPE, LAST_TYPE);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result = new HHasInstanceType(value, JS_FUNCTION_TYPE);
+  HHasInstanceType* result =
+      new(zone()) HHasInstanceType(value, JS_FUNCTION_TYPE);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasCachedArrayIndex* result = new HHasCachedArrayIndex(value);
+  HHasCachedArrayIndex* result = new(zone()) HHasCachedArrayIndex(value);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result = new HHasInstanceType(value, JS_ARRAY_TYPE);
+  HHasInstanceType* result = new(zone()) HHasInstanceType(value, JS_ARRAY_TYPE);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HHasInstanceType* result = new HHasInstanceType(value, JS_REGEXP_TYPE);
+  HHasInstanceType* result =
+      new(zone()) HHasInstanceType(value, JS_REGEXP_TYPE);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HIsObject* test = new HIsObject(value);
+  HIsObject* test = new(zone()) HIsObject(value);
   ast_context()->ReturnInstruction(test, call->id());
 }
 
 
 void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsNonNegativeSmi");
+  return Bailout("inlined runtime function: IsNonNegativeSmi");
 }
 
 
 void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsUndetectableObject");
+  return Bailout("inlined runtime function: IsUndetectableObject");
 }
 
 
 void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
+  return Bailout(
+      "inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
 }
 
 
 // Support for construct call checks.
 void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
-  ast_context()->ReturnInstruction(new HIsConstructCall, call->id());
+  if (function_state()->outer() != NULL) {
+    // We are generating graph for inlined function. Currently
+    // constructor inlining is not supported and we can just return
+    // false from %_IsConstructCall().
+    ast_context()->ReturnValue(graph()->GetConstantFalse());
+  } else {
+    ast_context()->ReturnInstruction(new(zone()) HIsConstructCall, call->id());
+  }
 }
 
 
 // Support for arguments.length and arguments[?].
 void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
-  HInstruction* elements = AddInstruction(new HArgumentsElements);
-  HArgumentsLength* result = new HArgumentsLength(elements);
+  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+  HArgumentsLength* result = new(zone()) HArgumentsLength(elements);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateArguments(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* index = Pop();
-  HInstruction* elements = AddInstruction(new HArgumentsElements);
-  HInstruction* length = AddInstruction(new HArgumentsLength(elements));
-  HAccessArgumentsAt* result = new HAccessArgumentsAt(elements, length, index);
+  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+  HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
+  HAccessArgumentsAt* result =
+      new(zone()) HAccessArgumentsAt(elements, length, index);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -5121,29 +5371,29 @@
 void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
   // The special form detected by IsClassOfTest is detected before we get here
   // and does not cause a bailout.
-  BAILOUT("inlined runtime function: ClassOf");
+  return Bailout("inlined runtime function: ClassOf");
 }
 
 
 void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HValueOf* result = new HValueOf(value);
+  HValueOf* result = new(zone()) HValueOf(value);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  BAILOUT("inlined runtime function: SetValueOf");
+  return Bailout("inlined runtime function: SetValueOf");
 }
 
 
 // Fast support for charCodeAt(n).
 void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
   HValue* string = Pop();
   HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
@@ -5154,9 +5404,9 @@
 // Fast support for string.charAt(n) and string[n].
 void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
-  HStringCharFromCode* result = new HStringCharFromCode(char_code);
+  HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -5164,13 +5414,13 @@
 // Fast support for string.charAt(n) and string[n].
 void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
   HValue* string = Pop();
   HStringCharCodeAt* char_code = BuildStringCharCodeAt(string, index);
   AddInstruction(char_code);
-  HStringCharFromCode* result = new HStringCharFromCode(char_code);
+  HStringCharFromCode* result = new(zone()) HStringCharFromCode(char_code);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -5178,11 +5428,11 @@
 // Fast support for object equality testing.
 void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 2);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
-  HCompareJSObjectEq* result = new HCompareJSObjectEq(left, right);
+  HCompareJSObjectEq* result = new(zone()) HCompareJSObjectEq(left, right);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -5195,18 +5445,17 @@
 
 // Fast support for Math.random().
 void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
-  BAILOUT("inlined runtime function: RandomHeapNumber");
+  return Bailout("inlined runtime function: RandomHeapNumber");
 }
 
 
 // Fast support for StringAdd.
 void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::StringAdd, 2);
+  HCallStub* result = new(zone()) HCallStub(context, CodeStub::StringAdd, 2);
   Drop(2);
   ast_context()->ReturnInstruction(result, call->id());
 }
@@ -5215,11 +5464,10 @@
 // Fast support for SubString.
 void HGraphBuilder::GenerateSubString(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::SubString, 3);
+  HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3);
   Drop(3);
   ast_context()->ReturnInstruction(result, call->id());
 }
@@ -5228,11 +5476,11 @@
 // Fast support for StringCompare.
 void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::StringCompare, 2);
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::StringCompare, 2);
   Drop(2);
   ast_context()->ReturnInstruction(result, call->id());
 }
@@ -5241,11 +5489,10 @@
 // Support for direct calls from JavaScript to native RegExp code.
 void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
   ASSERT_EQ(4, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::RegExpExec, 4);
+  HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4);
   Drop(4);
   ast_context()->ReturnInstruction(result, call->id());
 }
@@ -5254,12 +5501,11 @@
 // Construct a RegExp exec result with two in-object properties.
 void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
   ASSERT_EQ(3, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
   HCallStub* result =
-      new HCallStub(context, CodeStub::RegExpConstructResult, 3);
+      new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3);
   Drop(3);
   ast_context()->ReturnInstruction(result, call->id());
 }
@@ -5267,18 +5513,18 @@
 
 // Support for fast native caches.
 void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
-  BAILOUT("inlined runtime function: GetFromCache");
+  return Bailout("inlined runtime function: GetFromCache");
 }
 
 
 // Fast support for number to string.
 void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::NumberToString, 1);
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::NumberToString, 1);
   Drop(1);
   ast_context()->ReturnInstruction(result, call->id());
 }
@@ -5288,35 +5534,49 @@
 // indices. This should only be used if the indices are known to be
 // non-negative and within bounds of the elements array at the call site.
 void HGraphBuilder::GenerateSwapElements(CallRuntime* call) {
-  BAILOUT("inlined runtime function: SwapElements");
+  return Bailout("inlined runtime function: SwapElements");
 }
 
 
 // Fast call for custom callbacks.
 void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
-  BAILOUT("inlined runtime function: CallFunction");
+  // 1 ~ The function to call is not itself an argument to the call.
+  int arg_count = call->arguments()->length() - 1;
+  ASSERT(arg_count >= 1);  // There's always at least a receiver.
+
+  for (int i = 0; i < arg_count; ++i) {
+    CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
+  }
+  CHECK_ALIVE(VisitForValue(call->arguments()->last()));
+  HValue* function = Pop();
+  HContext* context = new HContext;
+  AddInstruction(context);
+  HInvokeFunction* result =
+      new(zone()) HInvokeFunction(context, function, arg_count);
+  Drop(arg_count);
+  ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 // Fast call to math functions.
 void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
   ASSERT_EQ(2, call->arguments()->length());
-  VISIT_FOR_VALUE(call->arguments()->at(0));
-  VISIT_FOR_VALUE(call->arguments()->at(1));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
   HValue* left = Pop();
-  HPower* result = new HPower(left, right);
+  HPower* result = new(zone()) HPower(left, right);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::SIN);
   Drop(1);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5325,11 +5585,11 @@
 
 void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::COS);
   Drop(1);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5338,11 +5598,11 @@
 
 void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
-  VisitArgumentList(call->arguments());
-  CHECK_BAILOUT;
-  HContext* context = new HContext;
+  CHECK_ALIVE(VisitArgumentList(call->arguments()));
+  HContext* context = new(zone()) HContext;
   AddInstruction(context);
-  HCallStub* result = new HCallStub(context, CodeStub::TranscendentalCache, 1);
+  HCallStub* result =
+      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
   result->set_transcendental_type(TranscendentalCache::LOG);
   Drop(1);
   ast_context()->ReturnInstruction(result, call->id());
@@ -5350,35 +5610,32 @@
 
 
 void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
-  BAILOUT("inlined runtime function: MathSqrt");
+  return Bailout("inlined runtime function: MathSqrt");
 }
 
 
 // Check whether two RegExps are equivalent
 void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
-  BAILOUT("inlined runtime function: IsRegExpEquivalent");
+  return Bailout("inlined runtime function: IsRegExpEquivalent");
 }
 
 
 void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 1);
-  VISIT_FOR_VALUE(call->arguments()->at(0));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
-  HGetCachedArrayIndex* result = new HGetCachedArrayIndex(value);
+  HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value);
   ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
-  BAILOUT("inlined runtime function: FastAsciiArrayJoin");
+  return Bailout("inlined runtime function: FastAsciiArrayJoin");
 }
 
 
-#undef BAILOUT
 #undef CHECK_BAILOUT
-#undef VISIT_FOR_EFFECT
-#undef VISIT_FOR_VALUE
-#undef ADD_TO_SUBGRAPH
+#undef CHECK_ALIVE
 
 
 HEnvironment::HEnvironment(HEnvironment* outer,
@@ -5453,7 +5710,7 @@
     } else if (values_[i] != other->values_[i]) {
       // There is a fresh value on the incoming edge, a phi is needed.
       ASSERT(values_[i] != NULL && other->values_[i] != NULL);
-      HPhi* phi = new HPhi(i);
+      HPhi* phi = new(block->zone()) HPhi(i);
       HValue* old_value = values_[i];
       for (int j = 0; j < block->predecessors()->length(); j++) {
         phi->AddInput(old_value);
@@ -5510,7 +5767,7 @@
 
 
 HEnvironment* HEnvironment::Copy() const {
-  return new HEnvironment(this);
+  return new(closure()->GetIsolate()->zone()) HEnvironment(this);
 }
 
 
@@ -5524,7 +5781,7 @@
 HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
   HEnvironment* new_env = Copy();
   for (int i = 0; i < values_.length(); ++i) {
-    HPhi* phi = new HPhi(i);
+    HPhi* phi = new(loop_header->zone()) HPhi(i);
     phi->AddInput(values_[i]);
     new_env->values_[i] = phi;
     loop_header->AddPhi(phi);
@@ -5543,7 +5800,9 @@
   HEnvironment* outer = Copy();
   outer->Drop(arity + 1);  // Including receiver.
   outer->ClearHistory();
-  HEnvironment* inner = new HEnvironment(outer, function->scope(), target);
+  Zone* zone = closure()->GetIsolate()->zone();
+  HEnvironment* inner =
+      new(zone) HEnvironment(outer, function->scope(), target);
   // Get the argument values from the original environment.
   if (is_speculative) {
     for (int i = 0; i <= arity; ++i) {  // Include receiver.
@@ -5563,7 +5822,7 @@
     inner->SetValueAt(local_base + i, undefined);
   }
 
-  inner->set_ast_id(function->id());
+  inner->set_ast_id(AstNode::kFunctionEntryId);
   return inner;
 }
 
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e14799a..74c119a 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -141,6 +141,8 @@
   bool IsInlineReturnTarget() const { return is_inline_return_target_; }
   void MarkAsInlineReturnTarget() { is_inline_return_target_ = true; }
 
+  inline Zone* zone();
+
 #ifdef DEBUG
   void Verify();
 #endif
@@ -201,6 +203,9 @@
  public:
   explicit HGraph(CompilationInfo* info);
 
+  Isolate* isolate() { return isolate_; }
+  Zone* zone() { return isolate_->zone(); }
+
   const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
   const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
   HBasicBlock* entry_block() const { return entry_block_; }
@@ -281,8 +286,6 @@
   void InitializeInferredTypes(int from_inclusive, int to_inclusive);
   void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
 
-  Isolate* isolate() { return isolate_; }
-
   Isolate* isolate_;
   int next_block_id_;
   HBasicBlock* entry_block_;
@@ -301,6 +304,9 @@
 };
 
 
+Zone* HBasicBlock::zone() { return graph_->zone(); }
+
+
 class HEnvironment: public ZoneObject {
  public:
   HEnvironment(HEnvironment* outer,
@@ -453,12 +459,17 @@
   // the instruction as value.
   virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
 
+  void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
+  bool is_for_typeof() { return for_typeof_; }
+
  protected:
   AstContext(HGraphBuilder* owner, Expression::Context kind);
   virtual ~AstContext();
 
   HGraphBuilder* owner() const { return owner_; }
 
+  inline Zone* zone();
+
   // We want to be able to assert, in a context-specific way, that the stack
   // height makes sense when the context is filled.
 #ifdef DEBUG
@@ -469,6 +480,7 @@
   HGraphBuilder* owner_;
   Expression::Context kind_;
   AstContext* outer_;
+  bool for_typeof_;
 };
 
 
@@ -544,6 +556,8 @@
     test_context_ = NULL;
   }
 
+  FunctionState* outer() { return outer_; }
+
  private:
   HGraphBuilder* owner_;
 
@@ -624,7 +638,8 @@
         break_scope_(NULL),
         graph_(NULL),
         current_block_(NULL),
-        inlined_count_(0) {
+        inlined_count_(0),
+        zone_(info->isolate()->zone()) {
     // This is not initialized in the initializer list because the
     // constructor for the initial state relies on function_state_ == NULL
     // to know it's the initial state.
@@ -694,6 +709,9 @@
   void ClearInlinedTestContext() {
     function_state()->ClearInlinedTestContext();
   }
+  bool function_strict_mode() {
+    return function_state()->compilation_info()->is_strict_mode();
+  }
 
   // Generators for inline runtime functions.
 #define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize)      \
@@ -735,6 +753,7 @@
   void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
 
   void VisitForValue(Expression* expr);
+  void VisitForTypeOf(Expression* expr);
   void VisitForEffect(Expression* expr);
   void VisitForControl(Expression* expr,
                        HBasicBlock* true_block,
@@ -770,9 +789,13 @@
   HBasicBlock* CreateLoopHeaderBlock();
 
   // Helpers for flow graph construction.
-  void LookupGlobalPropertyCell(Variable* var,
-                                LookupResult* lookup,
-                                bool is_store);
+  enum GlobalPropertyAccess {
+    kUseCell,
+    kUseGeneric
+  };
+  GlobalPropertyAccess LookupGlobalProperty(Variable* var,
+                                            LookupResult* lookup,
+                                            bool is_store);
 
   bool TryArgumentsAccess(Property* expr);
   bool TryCallApply(Call* expr);
@@ -825,6 +848,10 @@
   HInstruction* BuildLoadKeyedGeneric(HValue* object,
                                       HValue* key);
 
+  HInstruction* BuildLoadKeyed(HValue* obj,
+                               HValue* key,
+                               Property* prop);
+
   HInstruction* BuildLoadNamed(HValue* object,
                                Property* prop,
                                Handle<Map> map,
@@ -854,7 +881,12 @@
       HValue* object,
       HValue* key,
       HValue* val,
-      Assignment* expr);
+      Expression* expr);
+
+  HInstruction* BuildStoreKeyed(HValue* object,
+                                HValue* key,
+                                HValue* value,
+                                Expression* assignment);
 
   HValue* BuildContextChainWalk(Variable* var);
 
@@ -863,6 +895,7 @@
                                 Handle<Map> receiver_map,
                                 bool smi_and_map_check);
 
+  Zone* zone() { return zone_; }
 
   // The translation state of the currently-being-translated function.
   FunctionState* function_state_;
@@ -882,6 +915,8 @@
 
   int inlined_count_;
 
+  Zone* zone_;
+
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
 
@@ -889,6 +924,9 @@
 };
 
 
+Zone* AstContext::zone() { return owner_->zone(); }
+
+
 class HValueMap: public ZoneObject {
  public:
   HValueMap()
@@ -911,7 +949,10 @@
   }
 
   HValue* Lookup(HValue* value) const;
-  HValueMap* Copy() const { return new HValueMap(this); }
+
+  HValueMap* Copy(Zone* zone) const {
+    return new(zone) HValueMap(this);
+  }
 
  private:
   // A linked list of HValue* values.  Stored in arrays.
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 1da3f81..a9247f4 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -225,9 +225,9 @@
     StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
@@ -237,7 +237,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index e6d245e..9273037 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -48,24 +48,37 @@
 // -----------------------------------------------------------------------------
 // Implementation of CpuFeatures
 
-CpuFeatures::CpuFeatures()
-    : supported_(0),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
-void CpuFeatures::Probe(bool portable) {
-  ASSERT(HEAP->HasBeenSetup());
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
   ASSERT(supported_ == 0);
-  if (portable && Serializer::enabled()) {
+#ifdef DEBUG
+  initialized_ = true;
+#endif
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
 
-  Assembler assm(NULL, 0);
+  const int kBufferSize = 4 * KB;
+  VirtualMemory* memory = new VirtualMemory(kBufferSize);
+  if (!memory->IsReserved()) {
+    delete memory;
+    return;
+  }
+  ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+  if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+    delete memory;
+    return;
+  }
+
+  Assembler assm(NULL, memory->address(), kBufferSize);
   Label cpuid, done;
 #define __ assm.
   // Save old esp, since we are going to modify the stack.
@@ -119,27 +132,15 @@
   __ ret(0);
 #undef __
 
-  CodeDesc desc;
-  assm.GetCode(&desc);
-  Object* code;
-  { MaybeObject* maybe_code =
-        assm.isolate()->heap()->CreateCode(desc,
-                                           Code::ComputeFlags(Code::STUB),
-                                           Handle<Code>::null());
-    if (!maybe_code->ToObject(&code)) return;
-  }
-  if (!code->IsCode()) return;
-
-  PROFILE(ISOLATE,
-          CodeCreateEvent(Logger::BUILTIN_TAG,
-                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
-  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
   supported_ = probe();
   found_by_runtime_probing_ = supported_;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+  found_by_runtime_probing_ &= ~os_guarantees;
+
+  delete memory;
 }
 
 
@@ -297,8 +298,8 @@
 static void InitCoverageLog();
 #endif
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       positions_recorder_(this),
       emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
@@ -386,7 +387,7 @@
 
 
 void Assembler::cpuid() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
+  ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x0F);
@@ -747,7 +748,7 @@
 
 
 void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   UNIMPLEMENTED();
@@ -758,7 +759,7 @@
 
 
 void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   UNIMPLEMENTED();
@@ -769,7 +770,7 @@
 
 
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CMOV));
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
@@ -1450,7 +1451,7 @@
 
 
 void Assembler::rdtsc() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(RDTSC));
+  ASSERT(CpuFeatures::IsEnabled(RDTSC));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x0F);
@@ -1856,7 +1857,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xDB);
@@ -1865,7 +1866,7 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xDD);
@@ -2134,7 +2135,7 @@
 
 
 void Assembler::cvttss2si(Register dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2145,7 +2146,7 @@
 
 
 void Assembler::cvttsd2si(Register dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2156,7 +2157,7 @@
 
 
 void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2167,7 +2168,7 @@
 
 
 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2178,7 +2179,7 @@
 
 
 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2189,7 +2190,7 @@
 
 
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2200,7 +2201,7 @@
 
 
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2211,7 +2212,7 @@
 
 
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2222,7 +2223,7 @@
 
 
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2233,7 +2234,7 @@
 
 
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2264,7 +2265,7 @@
 
 
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2275,7 +2276,7 @@
 
 
 void Assembler::movmskpd(Register dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2286,7 +2287,7 @@
 
 
 void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2298,7 +2299,7 @@
 
 
 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x0F);
@@ -2308,7 +2309,7 @@
 
 
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2319,7 +2320,7 @@
 
 
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2330,7 +2331,7 @@
 
 
 void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2341,7 +2342,7 @@
 
 
 void Assembler::movdqu(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2352,7 +2353,7 @@
 
 
 void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2364,7 +2365,7 @@
 
 
 void Assembler::movntdq(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2400,7 +2401,7 @@
 
 
 void Assembler::movsd(const Operand& dst, XMMRegister src ) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);  // double
@@ -2411,7 +2412,7 @@
 
 
 void Assembler::movsd(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);  // double
@@ -2422,7 +2423,7 @@
 
 
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF2);
@@ -2433,7 +2434,7 @@
 
 
 void Assembler::movss(const Operand& dst, XMMRegister src ) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);  // float
@@ -2444,7 +2445,7 @@
 
 
 void Assembler::movss(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);  // float
@@ -2455,7 +2456,7 @@
 
 
 void Assembler::movss(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0xF3);
@@ -2466,7 +2467,7 @@
 
 
 void Assembler::movd(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2477,7 +2478,7 @@
 
 
 void Assembler::movd(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2488,7 +2489,7 @@
 
 
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2499,7 +2500,7 @@
 
 
 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2510,7 +2511,7 @@
 
 
 void Assembler::por(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2521,7 +2522,7 @@
 
 
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2533,7 +2534,7 @@
 
 
 void Assembler::psllq(XMMRegister reg, int8_t shift) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2545,7 +2546,7 @@
 
 
 void Assembler::psllq(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2556,7 +2557,7 @@
 
 
 void Assembler::psrlq(XMMRegister reg, int8_t shift) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2568,7 +2569,7 @@
 
 
 void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2579,7 +2580,7 @@
 
 
 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+  ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2591,7 +2592,7 @@
 
 
 void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
@@ -2604,7 +2605,7 @@
 
 
 void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE4_1));
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   EMIT(0x66);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 8e0c762..079dca7 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -446,16 +446,15 @@
 //   } else {
 //     // Generate standard x87 floating point code.
 //   }
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
-  // Detect features of the target CPU. If the portable flag is set,
-  // the method sets safe defaults if the serializer is enabled
-  // (snapshots must be portable).
-  void Probe(bool portable);
-  void Clear() { supported_ = 0; }
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == SSE2 && !FLAG_enable_sse2) return false;
     if (f == SSE3 && !FLAG_enable_sse3) return false;
     if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
@@ -463,46 +462,85 @@
     if (f == RDTSC && !FLAG_enable_rdtsc) return false;
     return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
   }
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    uint64_t enabled = isolate->enabled_cpu_features();
+    return (enabled & (static_cast<uint64_t>(1) << f)) != 0;
   }
+#endif
+
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
+    explicit Scope(CpuFeature f) {
       uint64_t mask = static_cast<uint64_t>(1) << f;
-      ASSERT(cpu_features_->IsSupported(f));
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-          (cpu_features_->found_by_runtime_probing_ & mask) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= mask;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = isolate_->enabled_cpu_features();
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    uint64_t old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    uint64_t old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
 
+  class TryForceFeatureScope BASE_EMBEDDED {
+   public:
+    explicit TryForceFeatureScope(CpuFeature f)
+        : old_supported_(CpuFeatures::supported_) {
+      if (CanForce()) {
+        CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f);
+      }
+    }
+
+    ~TryForceFeatureScope() {
+      if (CanForce()) {
+        CpuFeatures::supported_ = old_supported_;
+      }
+    }
+
+   private:
+    static bool CanForce() {
+      // It's only safe to temporarily force support of CPU features
+      // when there's only a single isolate, which is guaranteed when
+      // the serializer is enabled.
+      return Serializer::enabled();
+    }
+
+    const uint64_t old_supported_;
+  };
+
  private:
-  CpuFeatures();
-
-  uint64_t supported_;
-  uint64_t enabled_;
-  uint64_t found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static uint64_t supported_;
+  static uint64_t found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -535,7 +573,8 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  // TODO(vitalyr): the assembler does not need an isolate.
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 2970a0e..29c67b5 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
 
@@ -1523,12 +1523,8 @@
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  // We shouldn't be performing on-stack replacement in the first
-  // place if the CPU features we need for the optimized Crankshaft
-  // code aren't supported.
-  CpuFeatures* cpu_features = masm->isolate()->cpu_features();
-  cpu_features->Probe(false);
-  if (!cpu_features->IsSupported(SSE2)) {
+  CpuFeatures::TryForceFeatureScope scope(SSE2);
+  if (!CpuFeatures::IsSupported(SSE2)) {
     __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
     return;
   }
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 96faae9..275e8e2 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -291,166 +291,6 @@
 }
 
 
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
-               op_name,
-               overwrite_name,
-               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
-               args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "",
-               static_operands_type_.ToString(),
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in edx and right in eax.
-    Register left_arg = edx;
-    Register right_arg = eax;
-    if (!(left.is(left_arg) && right.is(right_arg))) {
-      if (left.is(right_arg) && right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          SetArgsReversed();
-        } else {
-          __ xchg(left, right);
-        }
-      } else if (left.is(left_arg)) {
-        __ mov(right_arg, right);
-      } else if (right.is(right_arg)) {
-        __ mov(left_arg, left);
-      } else if (left.is(right_arg)) {
-        if (IsOperationCommutative()) {
-          __ mov(left_arg, right);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying left argument.
-          __ mov(left_arg, left);
-          __ mov(right_arg, right);
-        }
-      } else if (right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          __ mov(right_arg, left);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying right argument.
-          __ mov(right_arg, right);
-          __ mov(left_arg, left);
-        }
-      } else {
-        // Order of moves is not important.
-        __ mov(left_arg, left);
-        __ mov(right_arg, right);
-      }
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    __ IncrementCounter(
-        masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Smi* right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(Immediate(right));
-  } else {
-    // The calling convention with registers is left in edx and right in eax.
-    Register left_arg = edx;
-    Register right_arg = eax;
-    if (left.is(left_arg)) {
-      __ mov(right_arg, Immediate(right));
-    } else if (left.is(right_arg) && IsOperationCommutative()) {
-      __ mov(left_arg, Immediate(right));
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, left and right_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite left before moving
-      // it to left_arg.
-      __ mov(left_arg, left);
-      __ mov(right_arg, Immediate(right));
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    __ IncrementCounter(
-        masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Smi* left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(Immediate(left));
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in edx and right in eax.
-    Register left_arg = edx;
-    Register right_arg = eax;
-    if (right.is(right_arg)) {
-      __ mov(left_arg, Immediate(left));
-    } else if (right.is(left_arg) && IsOperationCommutative()) {
-      __ mov(right_arg, Immediate(left));
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, right and left_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite right before moving
-      // it to right_arg.
-      __ mov(right_arg, right);
-      __ mov(left_arg, Immediate(left));
-    }
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
 class FloatingPointHelper : public AllStatic {
  public:
 
@@ -534,762 +374,6 @@
 };
 
 
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
-  // dividend in eax and edx free for the division.  Use eax, ebx for those.
-  Comment load_comment(masm, "-- Load arguments");
-  Register left = edx;
-  Register right = eax;
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    left = eax;
-    right = ebx;
-    if (HasArgsInRegisters()) {
-      __ mov(ebx, eax);
-      __ mov(eax, edx);
-    }
-  }
-  if (!HasArgsInRegisters()) {
-    __ mov(right, Operand(esp, 1 * kPointerSize));
-    __ mov(left, Operand(esp, 2 * kPointerSize));
-  }
-
-  if (static_operands_type_.IsSmi()) {
-    if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left);
-      __ AbortIfNotSmi(right);
-    }
-    if (op_ == Token::BIT_OR) {
-      __ or_(right, Operand(left));
-      GenerateReturn(masm);
-      return;
-    } else if (op_ == Token::BIT_AND) {
-      __ and_(right, Operand(left));
-      GenerateReturn(masm);
-      return;
-    } else if (op_ == Token::BIT_XOR) {
-      __ xor_(right, Operand(left));
-      GenerateReturn(masm);
-      return;
-    }
-  }
-
-  // 2. Prepare the smi check of both operands by oring them together.
-  Comment smi_check_comment(masm, "-- Smi check arguments");
-  Label not_smis;
-  Register combined = ecx;
-  ASSERT(!left.is(combined) && !right.is(combined));
-  switch (op_) {
-    case Token::BIT_OR:
-      // Perform the operation into eax and smi check the result.  Preserve
-      // eax in case the result is not a smi.
-      ASSERT(!left.is(ecx) && !right.is(ecx));
-      __ mov(ecx, right);
-      __ or_(right, Operand(left));  // Bitwise or is commutative.
-      combined = right;
-      break;
-
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      __ mov(combined, right);
-      __ or_(combined, Operand(left));
-      break;
-
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-      // Move the right operand into ecx for the shift operation, use eax
-      // for the smi check register.
-      ASSERT(!left.is(ecx) && !right.is(ecx));
-      __ mov(ecx, right);
-      __ or_(right, Operand(left));
-      combined = right;
-      break;
-
-    default:
-      break;
-  }
-
-  // 3. Perform the smi check of the operands.
-  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
-  __ test(combined, Immediate(kSmiTagMask));
-  __ j(not_zero, &not_smis, not_taken);
-
-  // 4. Operands are both smis, perform the operation leaving the result in
-  // eax and check the result if necessary.
-  Comment perform_smi(masm, "-- Perform smi operation");
-  Label use_fp_on_smis;
-  switch (op_) {
-    case Token::BIT_OR:
-      // Nothing to do.
-      break;
-
-    case Token::BIT_XOR:
-      ASSERT(right.is(eax));
-      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
-      break;
-
-    case Token::BIT_AND:
-      ASSERT(right.is(eax));
-      __ and_(right, Operand(left));  // Bitwise and is commutative.
-      break;
-
-    case Token::SHL:
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(left);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      __ shl_cl(left);
-      // Check that the *signed* result fits in a smi.
-      __ cmp(left, 0xc0000000);
-      __ j(sign, &use_fp_on_smis, not_taken);
-      // Tag the result and store it in register eax.
-      __ SmiTag(left);
-      __ mov(eax, left);
-      break;
-
-    case Token::SAR:
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(left);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      __ sar_cl(left);
-      // Tag the result and store it in register eax.
-      __ SmiTag(left);
-      __ mov(eax, left);
-      break;
-
-    case Token::SHR:
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(left);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      __ shr_cl(left);
-      // Check that the *unsigned* result fits in a smi.
-      // Neither of the two high-order bits can be set:
-      // - 0x80000000: high bit would be lost when smi tagging.
-      // - 0x40000000: this number would convert to negative when
-      // Smi tagging these two cases can only happen with shifts
-      // by 0 or 1 when handed a valid smi.
-      __ test(left, Immediate(0xc0000000));
-      __ j(not_zero, slow, not_taken);
-      // Tag the result and store it in register eax.
-      __ SmiTag(left);
-      __ mov(eax, left);
-      break;
-
-    case Token::ADD:
-      ASSERT(right.is(eax));
-      __ add(right, Operand(left));  // Addition is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
-      break;
-
-    case Token::SUB:
-      __ sub(left, Operand(right));
-      __ j(overflow, &use_fp_on_smis, not_taken);
-      __ mov(eax, left);
-      break;
-
-    case Token::MUL:
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
-      // We can't revert the multiplication if the result is not a smi
-      // so save the right operand.
-      __ mov(ebx, right);
-      // Remove tag from one of the operands (but keep sign).
-      __ SmiUntag(right);
-      // Do multiplication.
-      __ imul(right, Operand(left));  // Multiplication is commutative.
-      __ j(overflow, &use_fp_on_smis, not_taken);
-      // Check for negative zero result.  Use combined = left | right.
-      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
-      break;
-
-    case Token::DIV:
-      // We can't revert the division if the result is not a smi so
-      // save the left operand.
-      __ mov(edi, left);
-      // Check for 0 divisor.
-      __ test(right, Operand(right));
-      __ j(zero, &use_fp_on_smis, not_taken);
-      // Sign extend left into edx:eax.
-      ASSERT(left.is(eax));
-      __ cdq();
-      // Divide edx:eax by right.
-      __ idiv(right);
-      // Check for the corner case of dividing the most negative smi by
-      // -1. We cannot use the overflow flag, since it is not set by idiv
-      // instruction.
-      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-      __ cmp(eax, 0x40000000);
-      __ j(equal, &use_fp_on_smis);
-      // Check for negative zero result.  Use combined = left | right.
-      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
-      // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
-      __ j(not_zero, &use_fp_on_smis);
-      // Tag the result and store it in register eax.
-      __ SmiTag(eax);
-      break;
-
-    case Token::MOD:
-      // Check for 0 divisor.
-      __ test(right, Operand(right));
-      __ j(zero, &not_smis, not_taken);
-
-      // Sign extend left into edx:eax.
-      ASSERT(left.is(eax));
-      __ cdq();
-      // Divide edx:eax by right.
-      __ idiv(right);
-      // Check for negative zero result.  Use combined = left | right.
-      __ NegativeZeroTest(edx, combined, slow);
-      // Move remainder to register eax.
-      __ mov(eax, edx);
-      break;
-
-    default:
-      UNREACHABLE();
-  }
-
-  // 5. Emit return of result in eax.
-  GenerateReturn(masm);
-
-  // 6. For some operations emit inline code to perform floating point
-  // operations on known smis (e.g., if the result of the operation
-  // overflowed the smi range).
-  switch (op_) {
-    case Token::SHL: {
-      Comment perform_float(masm, "-- Perform float operation on smis");
-      __ bind(&use_fp_on_smis);
-      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
-        // Result we want is in left == edx, so we can put the allocated heap
-        // number in eax.
-        __ AllocateHeapNumber(eax, ecx, ebx, slow);
-        // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(left));
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        } else {
-          // It's OK to overwrite the right argument on the stack because we
-          // are about to return.
-          __ mov(Operand(esp, 1 * kPointerSize), left);
-          __ fild_s(Operand(esp, 1 * kPointerSize));
-          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        }
-        GenerateReturn(masm);
-      } else {
-        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
-        __ jmp(slow);
-      }
-      break;
-    }
-
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      Comment perform_float(masm, "-- Perform float operation on smis");
-      __ bind(&use_fp_on_smis);
-      // Restore arguments to edx, eax.
-      switch (op_) {
-        case Token::ADD:
-          // Revert right = right + left.
-          __ sub(right, Operand(left));
-          break;
-        case Token::SUB:
-          // Revert left = left - right.
-          __ add(left, Operand(right));
-          break;
-        case Token::MUL:
-          // Right was clobbered but a copy is in ebx.
-          __ mov(right, ebx);
-          break;
-        case Token::DIV:
-          // Left was clobbered but a copy is in edi.  Right is in ebx for
-          // division.
-          __ mov(edx, edi);
-          __ mov(eax, right);
-          break;
-        default: UNREACHABLE();
-          break;
-      }
-      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
-        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
-          switch (op_) {
-            case Token::ADD: __ addsd(xmm0, xmm1); break;
-            case Token::SUB: __ subsd(xmm0, xmm1); break;
-            case Token::MUL: __ mulsd(xmm0, xmm1); break;
-            case Token::DIV: __ divsd(xmm0, xmm1); break;
-            default: UNREACHABLE();
-          }
-          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
-        } else {  // SSE2 not available, use FPU.
-          FloatingPointHelper::LoadFloatSmis(masm, ebx);
-          switch (op_) {
-            case Token::ADD: __ faddp(1); break;
-            case Token::SUB: __ fsubp(1); break;
-            case Token::MUL: __ fmulp(1); break;
-            case Token::DIV: __ fdivp(1); break;
-            default: UNREACHABLE();
-          }
-          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
-        }
-        __ mov(eax, ecx);
-        GenerateReturn(masm);
-      } else {
-        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
-        __ jmp(slow);
-      }
-      break;
-    }
-
-    default:
-      break;
-  }
-
-  // 7. Non-smi operands, fall out to the non-smi code with the operands in
-  // edx and eax.
-  Comment done_comment(masm, "-- Enter non-smi code");
-  __ bind(&not_smis);
-  switch (op_) {
-    case Token::BIT_OR:
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-      // Right operand is saved in ecx and eax was destroyed by the smi
-      // check.
-      __ mov(eax, ecx);
-      break;
-
-    case Token::DIV:
-    case Token::MOD:
-      // Operands are in eax, ebx at this point.
-      __ mov(edx, eax);
-      __ mov(eax, ebx);
-      break;
-
-    default:
-      break;
-  }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
-
-  if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
-    Label slow;
-    if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
-    __ bind(&slow);
-    GenerateTypeTransition(masm);
-  }
-
-  // Generate fast case smi code if requested. This flag is set when the fast
-  // case smi code is not generated by the caller. Generating it here will speed
-  // up common operations.
-  if (ShouldGenerateSmiCode()) {
-    GenerateSmiCode(masm, &call_runtime);
-  } else if (op_ != Token::MOD) {  // MOD goes straight to runtime.
-    if (!HasArgsInRegisters()) {
-      GenerateLoadArguments(masm);
-    }
-  }
-
-  // Floating point case.
-  if (ShouldGenerateFPCode()) {
-    switch (op_) {
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV: {
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-smi argument occurs
-          // (and only if smi code is generated). This is the right moment to
-          // patch to HEAP_NUMBERS state. The transition is attempted only for
-          // the four basic operations. The stub stays in the DEFAULT state
-          // forever for all other operations (also if smi code is skipped).
-          GenerateTypeTransition(masm);
-          break;
-        }
-
-        Label not_floats;
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
-          CpuFeatures::Scope use_sse2(SSE2);
-          if (static_operands_type_.IsNumber()) {
-            if (FLAG_debug_code) {
-              // Assert at runtime that inputs are only numbers.
-              __ AbortIfNotNumber(edx);
-              __ AbortIfNotNumber(eax);
-            }
-            if (static_operands_type_.IsSmi()) {
-              if (FLAG_debug_code) {
-                __ AbortIfNotSmi(edx);
-                __ AbortIfNotSmi(eax);
-              }
-              FloatingPointHelper::LoadSSE2Smis(masm, ecx);
-            } else {
-              FloatingPointHelper::LoadSSE2Operands(masm);
-            }
-          } else {
-            FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
-          }
-
-          switch (op_) {
-            case Token::ADD: __ addsd(xmm0, xmm1); break;
-            case Token::SUB: __ subsd(xmm0, xmm1); break;
-            case Token::MUL: __ mulsd(xmm0, xmm1); break;
-            case Token::DIV: __ divsd(xmm0, xmm1); break;
-            default: UNREACHABLE();
-          }
-          GenerateHeapResultAllocation(masm, &call_runtime);
-          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-          GenerateReturn(masm);
-        } else {  // SSE2 not available, use FPU.
-          if (static_operands_type_.IsNumber()) {
-            if (FLAG_debug_code) {
-              // Assert at runtime that inputs are only numbers.
-              __ AbortIfNotNumber(edx);
-              __ AbortIfNotNumber(eax);
-            }
-          } else {
-            FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
-          }
-          FloatingPointHelper::LoadFloatOperands(
-              masm,
-              ecx,
-              FloatingPointHelper::ARGS_IN_REGISTERS);
-          switch (op_) {
-            case Token::ADD: __ faddp(1); break;
-            case Token::SUB: __ fsubp(1); break;
-            case Token::MUL: __ fmulp(1); break;
-            case Token::DIV: __ fdivp(1); break;
-            default: UNREACHABLE();
-          }
-          Label after_alloc_failure;
-          GenerateHeapResultAllocation(masm, &after_alloc_failure);
-          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-          GenerateReturn(masm);
-          __ bind(&after_alloc_failure);
-          __ ffree();
-          __ jmp(&call_runtime);
-        }
-        __ bind(&not_floats);
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            !HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-number argument
-          // occurs (and only if smi code is skipped from the stub, otherwise
-          // the patching has already been done earlier in this case branch).
-          // Try patching to STRINGS for ADD operation.
-          if (op_ == Token::ADD) {
-            GenerateTypeTransition(masm);
-          }
-        }
-        break;
-      }
-      case Token::MOD: {
-        // For MOD we go directly to runtime in the non-smi case.
-        break;
-      }
-      case Token::BIT_OR:
-      case Token::BIT_AND:
-      case Token::BIT_XOR:
-      case Token::SAR:
-      case Token::SHL:
-      case Token::SHR: {
-        Label non_smi_result;
-        FloatingPointHelper::LoadAsIntegers(masm,
-                                            static_operands_type_,
-                                            use_sse3_,
-                                            &call_runtime);
-        switch (op_) {
-          case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-          case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-          case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
-          case Token::SAR: __ sar_cl(eax); break;
-          case Token::SHL: __ shl_cl(eax); break;
-          case Token::SHR: __ shr_cl(eax); break;
-          default: UNREACHABLE();
-        }
-        if (op_ == Token::SHR) {
-          // Check if result is non-negative and fits in a smi.
-          __ test(eax, Immediate(0xc0000000));
-          __ j(not_zero, &call_runtime);
-        } else {
-          // Check if result fits in a smi.
-          __ cmp(eax, 0xc0000000);
-          __ j(negative, &non_smi_result);
-        }
-        // Tag smi result and return.
-        __ SmiTag(eax);
-        GenerateReturn(masm);
-
-        // All ops except SHR return a signed int32 that we load in
-        // a HeapNumber.
-        if (op_ != Token::SHR) {
-          __ bind(&non_smi_result);
-          // Allocate a heap number if needed.
-          __ mov(ebx, Operand(eax));  // ebx: result
-          NearLabel skip_allocation;
-          switch (mode_) {
-            case OVERWRITE_LEFT:
-            case OVERWRITE_RIGHT:
-              // If the operand was an object, we skip the
-              // allocation of a heap number.
-              __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
-                                  1 * kPointerSize : 2 * kPointerSize));
-              __ test(eax, Immediate(kSmiTagMask));
-              __ j(not_zero, &skip_allocation, not_taken);
-              // Fall through!
-            case NO_OVERWRITE:
-              __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
-              __ bind(&skip_allocation);
-              break;
-            default: UNREACHABLE();
-          }
-          // Store the result in the HeapNumber and return.
-          if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
-            CpuFeatures::Scope use_sse2(SSE2);
-            __ cvtsi2sd(xmm0, Operand(ebx));
-            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-          } else {
-            __ mov(Operand(esp, 1 * kPointerSize), ebx);
-            __ fild_s(Operand(esp, 1 * kPointerSize));
-            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-          }
-          GenerateReturn(masm);
-        }
-        break;
-      }
-      default: UNREACHABLE(); break;
-    }
-  }
-
-  // If all else fails, use the runtime system to get the correct
-  // result. If arguments was passed in registers now place them on the
-  // stack in the correct order below the return address.
-
-  // Avoid hitting the string ADD code below when allocation fails in
-  // the floating point code above.
-  if (op_ != Token::ADD) {
-    __ bind(&call_runtime);
-  }
-
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      // Test for string arguments before calling runtime.
-
-      // If this stub has already generated FP-specific code then the arguments
-      // are already in edx, eax
-      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
-        GenerateLoadArguments(masm);
-      }
-
-      // Registers containing left and right operands respectively.
-      Register lhs, rhs;
-      if (HasArgsReversed()) {
-        lhs = eax;
-        rhs = edx;
-      } else {
-        lhs = edx;
-        rhs = eax;
-      }
-
-      // Test if left operand is a string.
-      NearLabel lhs_not_string;
-      __ test(lhs, Immediate(kSmiTagMask));
-      __ j(zero, &lhs_not_string);
-      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &lhs_not_string);
-
-      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
-      __ TailCallStub(&string_add_left_stub);
-
-      NearLabel call_runtime_with_args;
-      // Left operand is not a string, test right.
-      __ bind(&lhs_not_string);
-      __ test(rhs, Immediate(kSmiTagMask));
-      __ j(zero, &call_runtime_with_args);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &call_runtime_with_args);
-
-      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
-      __ TailCallStub(&string_add_right_stub);
-
-      // Neither argument is a string.
-      __ bind(&call_runtime);
-      if (HasArgsInRegisters()) {
-        GenerateRegisterArgsPush(masm);
-      }
-      __ bind(&call_runtime_with_args);
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
-    }
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
-    case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
-    case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
-      break;
-    case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-      break;
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
-                                                       Label* alloc_failure) {
-  Label skip_allocation;
-  OverwriteMode mode = mode_;
-  if (HasArgsReversed()) {
-    if (mode == OVERWRITE_RIGHT) {
-      mode = OVERWRITE_LEFT;
-    } else if (mode == OVERWRITE_LEFT) {
-      mode = OVERWRITE_RIGHT;
-    }
-  }
-  switch (mode) {
-    case OVERWRITE_LEFT: {
-      // If the argument in edx is already an object, we skip the
-      // allocation of a heap number.
-      __ test(edx, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
-      // Allocate a heap number for the result. Keep eax and edx intact
-      // for the possible runtime call.
-      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
-      // Now edx can be overwritten losing one of the arguments as we are
-      // now done and will not need it any more.
-      __ mov(edx, Operand(ebx));
-      __ bind(&skip_allocation);
-      // Use object in edx as a result holder
-      __ mov(eax, Operand(edx));
-      break;
-    }
-    case OVERWRITE_RIGHT:
-      // If the argument in eax is already an object, we skip the
-      // allocation of a heap number.
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(not_zero, &skip_allocation, not_taken);
-      // Fall through!
-    case NO_OVERWRITE:
-      // Allocate a heap number for the result. Keep eax and edx intact
-      // for the possible runtime call.
-      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
-      // Now eax can be overwritten losing one of the arguments as we are
-      // now done and will not need it any more.
-      __ mov(eax, ebx);
-      __ bind(&skip_allocation);
-      break;
-    default: UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  // If arguments are not passed in registers read them from the stack.
-  ASSERT(!HasArgsInRegisters());
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
-  // If arguments are not passed in registers remove them from the stack before
-  // returning.
-  if (!HasArgsInRegisters()) {
-    __ ret(2 * kPointerSize);  // Remove both operands
-  } else {
-    __ ret(0);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  ASSERT(HasArgsInRegisters());
-  __ pop(ecx);
-  if (HasArgsReversed()) {
-    __ push(eax);
-    __ push(edx);
-  } else {
-    __ push(edx);
-    __ push(eax);
-  }
-  __ push(ecx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  // Ensure the operands are on the stack.
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  __ pop(ecx);  // Save return address.
-
-  // Left and right arguments are now on top.
-  // Push this stub's key. Although the operation and the type info are
-  // encoded into the key, the encoding is opaque, so push them too.
-  __ push(Immediate(Smi::FromInt(MinorKey())));
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
-
-  __ push(ecx);  // Push return address.
-
-  // Patch the caller to an appropriate specialized stub and return the
-  // operation result to the caller of the stub.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
@@ -1362,6 +446,9 @@
     case TRBinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
+    case TRBinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
     case TRBinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
@@ -1660,7 +747,7 @@
         // number in eax.
         __ AllocateHeapNumber(eax, ecx, ebx, slow);
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(left));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -1705,7 +792,7 @@
             break;
         }
         __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           FloatingPointHelper::LoadSSE2Smis(masm, ebx);
           switch (op_) {
@@ -1825,6 +912,38 @@
 }
 
 
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = edx;
+  Register right = eax;
+
+  // Test if left operand is a string.
+  __ test(left, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  // Test if right operand is a string.
+  __ test(right, Immediate(kSmiTagMask));
+  __ j(zero, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
   Label call_runtime;
   ASSERT(operands_type_ == TRBinaryOpIC::INT32);
@@ -1837,7 +956,7 @@
     case Token::DIV: {
       Label not_floats;
       Label not_int32;
-      if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
         FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
@@ -1958,7 +1077,7 @@
           default: UNREACHABLE();
         }
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2036,23 +1155,25 @@
     GenerateAddStrings(masm);
   }
 
+  Factory* factory = masm->isolate()->factory();
+
   // Convert odd ball arguments to numbers.
   NearLabel check, done;
-  __ cmp(edx, FACTORY->undefined_value());
+  __ cmp(edx, factory->undefined_value());
   __ j(not_equal, &check);
   if (Token::IsBitOp(op_)) {
     __ xor_(edx, Operand(edx));
   } else {
-    __ mov(edx, Immediate(FACTORY->nan_value()));
+    __ mov(edx, Immediate(factory->nan_value()));
   }
   __ jmp(&done);
   __ bind(&check);
-  __ cmp(eax, FACTORY->undefined_value());
+  __ cmp(eax, factory->undefined_value());
   __ j(not_equal, &done);
   if (Token::IsBitOp(op_)) {
     __ xor_(eax, Operand(eax));
   } else {
-    __ mov(eax, Immediate(FACTORY->nan_value()));
+    __ mov(eax, Immediate(factory->nan_value()));
   }
   __ bind(&done);
 
@@ -2070,7 +1191,7 @@
     case Token::MUL:
     case Token::DIV: {
       Label not_floats;
-      if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
 
@@ -2173,7 +1294,7 @@
           default: UNREACHABLE();
         }
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2275,7 +1396,7 @@
     case Token::MUL:
     case Token::DIV: {
       Label not_floats;
-      if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
 
@@ -2373,7 +1494,7 @@
           default: UNREACHABLE();
         }
         // Store the result in the HeapNumber and return.
-        if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+        if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -2572,7 +1693,7 @@
 
     __ bind(&loaded);
   } else {  // UNTAGGED.
-    if (masm->isolate()->cpu_features()->IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
       __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
     } else {
@@ -2826,8 +1947,7 @@
   Label done, right_exponent, normal_exponent;
   Register scratch = ebx;
   Register scratch2 = edi;
-  if (type_info.IsInteger32() &&
-      masm->isolate()->cpu_features()->IsEnabled(SSE2)) {
+  if (type_info.IsInteger32() && CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope scope(SSE2);
     __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
     return;
@@ -3375,7 +2495,7 @@
     IntegerConvert(masm,
                    eax,
                    TypeInfo::Unknown(),
-                   masm->isolate()->cpu_features()->IsSupported(SSE3),
+                   CpuFeatures::IsSupported(SSE3),
                    &slow);
 
     // Do the bitwise operation and check if the result fits in a smi.
@@ -3398,7 +2518,7 @@
       __ AllocateHeapNumber(ebx, edx, edi, &slow);
       __ mov(eax, Operand(ebx));
     }
-    if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope use_sse2(SSE2);
       __ cvtsi2sd(xmm0, Operand(ecx));
       __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
@@ -4270,7 +3390,7 @@
                         FixedArray::kHeaderSize));
     __ test(probe, Immediate(kSmiTagMask));
     __ j(zero, not_found);
-    if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope fscope(SSE2);
       __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
       __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
@@ -4509,7 +3629,7 @@
   if (include_number_compare_) {
     Label non_number_comparison;
     Label unordered;
-    if (masm->isolate()->cpu_features()->IsSupported(SSE2)) {
+    if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatures::Scope use_sse2(SSE2);
       CpuFeatures::Scope use_cmov(CMOV);
 
@@ -6455,8 +5575,7 @@
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or SS2 or CMOV is unsupported.
-  CpuFeatures* cpu_features = masm->isolate()->cpu_features();
-  if (cpu_features->IsSupported(SSE2) && cpu_features->IsSupported(CMOV)) {
+  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
     CpuFeatures::Scope scope1(SSE2);
     CpuFeatures::Scope scope2(CMOV);
 
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 31fa645..cf73682 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -72,161 +72,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
-  NO_GENERIC_BINARY_FLAGS = 0,
-  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      GenericBinaryFlags flags,
-                      TypeInfo operands_type)
-      : op_(op),
-        mode_(mode),
-        flags_(flags),
-        args_in_registers_(false),
-        args_reversed_(false),
-        static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
-        name_(NULL) {
-    if (static_operands_type_.IsSmi()) {
-      mode_ = NO_OVERWRITE;
-    }
-    use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
-    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
-  }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        flags_(FlagBits::decode(key)),
-        args_in_registers_(ArgsInRegistersBits::decode(key)),
-        args_reversed_(ArgsReversedBits::decode(key)),
-        use_sse3_(SSE3Bits::decode(key)),
-        static_operands_type_(TypeInfo::ExpandedRepresentation(
-            StaticTypeInfoBits::decode(key))),
-        runtime_operands_type_(runtime_operands_type),
-        name_(NULL) {
-  }
-
-  // Generate code to call the stub with the supplied arguments. This will add
-  // code at the call site to prepare arguments either in registers or on the
-  // stack together with the actual call.
-  void GenerateCall(MacroAssembler* masm, Register left, Register right);
-  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
-  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
-  bool ArgsInRegistersSupported() {
-    return op_ == Token::ADD || op_ == Token::SUB
-        || op_ == Token::MUL || op_ == Token::DIV;
-  }
-
-  void SetArgsInRegisters() {
-    ASSERT(ArgsInRegistersSupported());
-    args_in_registers_ = true;
-  }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  GenericBinaryFlags flags_;
-  bool args_in_registers_;  // Arguments passed in registers not on the stack.
-  bool args_reversed_;  // Left and right argument are swapped.
-  bool use_sse3_;
-
-  // Number type information of operands, determined by code generator.
-  TypeInfo static_operands_type_;
-
-  // Operand type information determined at runtime.
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("GenericBinaryOpStub %d (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           static_cast<int>(flags_),
-           static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_),
-           static_operands_type_.ToString());
-  }
-#endif
-
-  // Minor key encoding in 18 bits RRNNNFRASOOOOOOOMM.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 7> {};
-  class SSE3Bits: public BitField<bool, 9, 1> {};
-  class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
-  class ArgsReversedBits: public BitField<bool, 11, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
-  class StaticTypeInfoBits: public BitField<int, 13, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | FlagBits::encode(flags_)
-           | SSE3Bits::encode(use_sse3_)
-           | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_)
-           | StaticTypeInfoBits::encode(
-                 static_operands_type_.ThreeBitRepresentation())
-           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
-  }
-
-  void Generate(MacroAssembler* masm);
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-  void GenerateLoadArguments(MacroAssembler* masm);
-  void GenerateReturn(MacroAssembler* masm);
-  void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
-  void GenerateRegisterArgsPush(MacroAssembler* masm);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  bool IsOperationCommutative() {
-    return (op_ == Token::ADD) || (op_ == Token::MUL);
-  }
-
-  void SetArgsReversed() { args_reversed_ = true; }
-  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
-  bool HasArgsInRegisters() { return args_in_registers_; }
-  bool HasArgsReversed() { return args_reversed_; }
-
-  bool ShouldGenerateSmiCode() {
-    return HasSmiCodeInStub() &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  virtual void FinishCode(Code* code) {
-    code->set_binary_op_type(runtime_operands_type_);
-  }
-
-  friend class CodeGenerator;
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -235,7 +80,7 @@
         operands_type_(TRBinaryOpIC::UNINITIALIZED),
         result_type_(TRBinaryOpIC::UNINITIALIZED),
         name_(NULL) {
-    use_sse3_ = Isolate::Current()->cpu_features()->IsSupported(SSE3);
+    use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
@@ -308,6 +153,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
   void GenerateAddStrings(MacroAssembler* masm);
 
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
deleted file mode 100644
index 49c706d..0000000
--- a/src/ia32/codegen-ia32-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_IA32_CODEGEN_IA32_INL_H_
-#define V8_IA32_CODEGEN_IA32_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_CODEGEN_IA32_INL_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index cf990a0..572c36c 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,81 +29,15 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ push(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
-      __ mov(Operand(ebp, action), RegisterAllocator::ToRegister(i));
-    }
-  }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
-  // Restore registers in reverse order due to the stack.
-  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ pop(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore) {
-      action &= ~kSyncedFlag;
-      __ mov(RegisterAllocator::ToRegister(i), Operand(ebp, action));
-    }
-  }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
-  frame_state_.Restore(masm_);
-}
-
 
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  frame_state_->Restore(masm);
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -114,10069 +48,21 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      destination_(NULL),
-      previous_(NULL) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
-                           ControlDestination* destination)
-    : owner_(owner),
-      destination_(destination),
-      previous_(owner->state()) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      state_(NULL),
-      loop_nesting_(0),
-      in_safe_int32_mode_(false),
-      safe_int32_mode_enabled_(true),
-      function_return_is_shadowed_(false),
-      in_spilled_code_(false),
-      jit_cookie_((FLAG_mask_constants_with_cookie) ?
-                  V8::RandomPrivate(Isolate::Current()) : 0) {
-}
-
-
-// Calling conventions:
-// ebp: caller's frame pointer
-// esp: stack pointer
-// edi: called JS function
-// esi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  set_in_spilled_code(false);
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  masm()->isolate()->set_jump_target_compiling_deferred_code(false);
-
-  {
-    CodeGenState state(this);
-
-    // Entry:
-    // Stack: receiver, arguments, return address.
-    // ebp: caller's frame pointer
-    // esp: stack pointer
-    // edi: called JS function
-    // esi: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ int3();
-    }
-#endif
-
-    frame_->Enter();
-
-    // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots();
-
-    // Allocate the local context if needed.
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      Comment cmnt(masm_, "[ allocate local context");
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      frame_->PushFunction();
-      Result context;
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        context = frame_->CallStub(&stub, 1);
-      } else {
-        context = frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-      // Update context local.
-      frame_->SaveContextRegister();
-
-      // Verify that the runtime call result and esi agree.
-      if (FLAG_debug_code) {
-        __ cmp(context.reg(), Operand(esi));
-        __ Assert(equal, "Runtime::NewContext should end up in esi");
-      }
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          // The use of SlotOperand below is safe in unspilled code
-          // because the slot is guaranteed to be a context slot.
-          //
-          // There are no parameters in the global scope.
-          ASSERT(!scope()->is_global_scope());
-          frame_->PushParameterAt(i);
-          Result value = frame_->Pop();
-          value.ToRegister();
-
-          // SlotOperand loads context.reg() with the context object
-          // stored to, used below in RecordWrite.
-          Result context = allocator_->Allocate();
-          ASSERT(context.is_valid());
-          __ mov(SlotOperand(slot, context.reg()), value.reg());
-          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          Result scratch = allocator_->Allocate();
-          ASSERT(scratch.is_valid());
-          frame_->Spill(context.reg());
-          frame_->Spill(value.reg());
-          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->Push(FACTORY->the_hole_value());
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-    CheckStack();
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = info->isolate()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-
-      // Handle the return from the function.
-      if (has_valid_frame()) {
-        // If there is a valid frame, control flow can fall off the end of
-        // the body.  In that case there is an implicit return statement.
-        ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(info->function());
-        frame_->PrepareForReturn();
-        Result undefined(FACTORY->undefined_value());
-        if (function_return_.is_bound()) {
-          function_return_.Jump(&undefined);
-        } else {
-          function_return_.Bind(&undefined);
-          GenerateReturnSequence(&undefined);
-        }
-      } else if (function_return_.is_linked()) {
-        // If the return target has dangling jumps to it, then we have not
-        // yet generated the return sequence.  This can happen when (a)
-        // control does not flow off the end of the body so we did not
-        // compile an artificial return statement just above, and (b) there
-        // are return statements in the body but (c) they are all shadowed.
-        Result return_value;
-        function_return_.Bind(&return_value);
-        GenerateReturnSequence(&return_value);
-      }
-    }
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(state_ == NULL);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    info->isolate()->set_jump_target_compiling_deferred_code(true);
-    ProcessDeferred();
-    info->isolate()->set_jump_target_compiling_deferred_code(false);
-  }
-
-  // There is no need to delete the register allocator, it is a
-  // stack-allocated local.
-  allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(esi));  // do not overwrite context register
-      Register context = esi;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return Operand(eax);
-  }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                         Result tmp,
-                                                         JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  ASSERT(tmp.is_register());
-  Register context = esi;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-  }
-  // Check that last extension is NULL.
-  __ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  slow->Branch(not_equal, not_taken);
-  __ mov(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
-                                  ControlDestination* dest,
-                                  bool force_control) {
-  ASSERT(!in_spilled_code());
-  int original_height = frame_->height();
-
-  { CodeGenState new_state(this, dest);
-    Visit(expr);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        !dest->is_used() &&
-        frame_->height() == original_height) {
-      dest->Goto(true);
-    }
-  }
-
-  if (force_control && !dest->is_used()) {
-    // Convert the TOS value into flow to the control destination.
-    ToBoolean(dest);
-  }
-
-  ASSERT(!(force_control && !dest->is_used()));
-  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Load(expression);
-  frame_->SpillAll();
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::LoadInSafeInt32Mode(Expression* expr,
-                                         BreakTarget* unsafe_bailout) {
-  set_unsafe_bailout(unsafe_bailout);
-  set_in_safe_int32_mode(true);
-  Load(expr);
-  Result value = frame_->Pop();
-  ASSERT(frame_->HasNoUntaggedInt32Elements());
-  if (expr->GuaranteedSmiResult()) {
-    ConvertInt32ResultToSmi(&value);
-  } else {
-    ConvertInt32ResultToNumber(&value);
-  }
-  set_in_safe_int32_mode(false);
-  set_unsafe_bailout(NULL);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) {
-  set_safe_int32_mode_enabled(false);
-  Load(expr);
-  set_safe_int32_mode_enabled(true);
-}
-
-
-void CodeGenerator::ConvertInt32ResultToSmi(Result* value) {
-  ASSERT(value->is_untagged_int32());
-  if (value->is_register()) {
-    __ add(value->reg(), Operand(value->reg()));
-  } else {
-    ASSERT(value->is_constant());
-    ASSERT(value->handle()->IsSmi());
-  }
-  value->set_untagged_int32(false);
-  value->set_type_info(TypeInfo::Smi());
-}
-
-
-void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
-  ASSERT(value->is_untagged_int32());
-  if (value->is_register()) {
-    Register val = value->reg();
-    JumpTarget done;
-    __ add(val, Operand(val));
-    done.Branch(no_overflow, value);
-    __ sar(val, 1);
-    // If there was an overflow, bits 30 and 31 of the original number disagree.
-    __ xor_(val, 0x80000000u);
-    if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-      CpuFeatures::Scope fscope(SSE2);
-      __ cvtsi2sd(xmm0, Operand(val));
-    } else {
-      // Move val to ST[0] in the FPU
-      // Push and pop are safe with respect to the virtual frame because
-      // all synced elements are below the actual stack pointer.
-      __ push(val);
-      __ fild_s(Operand(esp, 0));
-      __ pop(val);
-    }
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_register());
-    Label allocation_failed;
-    __ AllocateHeapNumber(val, scratch.reg(),
-                          no_reg, &allocation_failed);
-    VirtualFrame* clone = new VirtualFrame(frame_);
-    scratch.Unuse();
-    if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-      CpuFeatures::Scope fscope(SSE2);
-      __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
-    } else {
-      __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
-    }
-    done.Jump(value);
-
-    // Establish the virtual frame, cloned from where AllocateHeapNumber
-    // jumped to allocation_failed.
-    RegisterFile empty_regs;
-    SetFrame(clone, &empty_regs);
-    __ bind(&allocation_failed);
-    if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-      // Pop the value from the floating point stack.
-      __ fstp(0);
-    }
-    unsafe_bailout_->Jump();
-
-    done.Bind(value);
-  } else {
-    ASSERT(value->is_constant());
-  }
-  value->set_untagged_int32(false);
-  value->set_type_info(TypeInfo::Integer32());
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-
-  // If the expression should be a side-effect-free 32-bit int computation,
-  // compile that SafeInt32 path, and a bailout path.
-  if (!in_safe_int32_mode() &&
-      safe_int32_mode_enabled() &&
-      expr->side_effect_free() &&
-      expr->num_bit_ops() > 2 &&
-      masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-    BreakTarget unsafe_bailout;
-    JumpTarget done;
-    unsafe_bailout.set_expected_height(frame_->height());
-    LoadInSafeInt32Mode(expr, &unsafe_bailout);
-    done.Jump();
-
-    if (unsafe_bailout.is_linked()) {
-      unsafe_bailout.Bind();
-      LoadWithSafeInt32ModeDisabled(expr);
-    }
-    done.Bind();
-  } else {
-    JumpTarget true_target;
-    JumpTarget false_target;
-    ControlDestination dest(&true_target, &false_target, true);
-    LoadCondition(expr, &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The false target was just bound.
-      JumpTarget loaded;
-      frame_->Push(FACTORY->false_value());
-      // There may be dangling jumps to the true target.
-      if (true_target.is_linked()) {
-        loaded.Jump();
-        true_target.Bind();
-        frame_->Push(FACTORY->true_value());
-        loaded.Bind();
-      }
-
-    } else if (dest.is_used()) {
-      // There is true, and possibly false, control flow (with true as
-      // the fall through).
-      JumpTarget loaded;
-      frame_->Push(FACTORY->true_value());
-      if (false_target.is_linked()) {
-        loaded.Jump();
-        false_target.Bind();
-        frame_->Push(FACTORY->false_value());
-        loaded.Bind();
-      }
-
-    } else {
-      // We have a valid value on top of the frame, but we still may
-      // have dangling jumps to the true and false targets from nested
-      // subexpressions (eg, the left subexpressions of the
-      // short-circuited boolean operators).
-      ASSERT(has_valid_frame());
-      if (true_target.is_linked() || false_target.is_linked()) {
-        JumpTarget loaded;
-        loaded.Jump();  // Don't lose the current TOS.
-        if (true_target.is_linked()) {
-          true_target.Bind();
-          frame_->Push(FACTORY->true_value());
-          if (false_target.is_linked()) {
-            loaded.Jump();
-          }
-        }
-        if (false_target.is_linked()) {
-          false_target.Bind();
-          frame_->Push(FACTORY->false_value());
-        }
-        loaded.Bind();
-      }
-    }
-  }
-  ASSERT(has_valid_frame());
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  if (in_spilled_code()) {
-    frame_->EmitPush(GlobalObjectOperand());
-  } else {
-    Result temp = allocator_->Allocate();
-    __ mov(temp.reg(), GlobalObjectOperand());
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
-  Result temp = allocator_->Allocate();
-  Register reg = temp.reg();
-  __ mov(reg, GlobalObjectOperand());
-  __ mov(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the arguments marker value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->Push(FACTORY->arguments_marker());
-  } else {
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    frame_->PushFunction();
-    frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope()->num_parameters()));
-    Result result = frame_->CallStub(&stub, 3);
-    frame_->Push(&result);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  bool skip_arguments = false;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
-    if (probe.is_constant()) {
-      // We have to skip updating the arguments object if it has
-      // been assigned a proper value.
-      skip_arguments = !probe.handle()->IsArgumentsMarker();
-    } else {
-      __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
-      probe.Unuse();
-      done.Branch(not_equal);
-    }
-  }
-  if (!skip_arguments) {
-    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  }
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-  return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  // References are loaded from both spilled and unspilled code.  Set the
-  // state to unspilled to allow that (and explicitly spill after
-  // construction at the construction sites).
-  bool was_in_spilled_code = in_spilled_code_;
-  in_spilled_code_ = false;
-
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      // If eax is free, the register allocator prefers it.  Thus the code
-      // generator will load the global object into eax, which is where
-      // LoadIC wants it.  Most uses of Reference call LoadIC directly
-      // after the reference is created.
-      frame_->Spill(eax);
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-
-  in_spilled_code_ = was_in_spilled_code;
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
-  Comment cmnt(masm_, "[ ToBoolean");
-
-  // The value to convert should be popped from the frame.
-  Result value = frame_->Pop();
-  value.ToRegister();
-
-  if (value.is_integer32()) {  // Also takes Smi case.
-    Comment cmnt(masm_, "ONLY_INTEGER_32");
-    if (FLAG_debug_code) {
-      Label ok;
-      __ AbortIfNotNumber(value.reg());
-      __ test(value.reg(), Immediate(kSmiTagMask));
-      __ j(zero, &ok);
-      __ fldz();
-      __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
-      __ FCmp();
-      __ j(not_zero, &ok);
-      __ Abort("Smi was wrapped in HeapNumber in output from bitop");
-      __ bind(&ok);
-    }
-    // In the integer32 case there are no Smis hidden in heap numbers, so we
-    // need only test for Smi zero.
-    __ test(value.reg(), Operand(value.reg()));
-    dest->false_target()->Branch(zero);
-    value.Unuse();
-    dest->Split(not_zero);
-  } else if (value.is_number()) {
-    Comment cmnt(masm_, "ONLY_NUMBER");
-    // Fast case if TypeInfo indicates only numbers.
-    if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg());
-    }
-    // Smi => false iff zero.
-    STATIC_ASSERT(kSmiTag == 0);
-    __ test(value.reg(), Operand(value.reg()));
-    dest->false_target()->Branch(zero);
-    __ test(value.reg(), Immediate(kSmiTagMask));
-    dest->true_target()->Branch(zero);
-    __ fldz();
-    __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
-    __ FCmp();
-    value.Unuse();
-    dest->Split(not_zero);
-  } else {
-    // Fast case checks.
-    // 'false' => false.
-    __ cmp(value.reg(), FACTORY->false_value());
-    dest->false_target()->Branch(equal);
-
-    // 'true' => true.
-    __ cmp(value.reg(), FACTORY->true_value());
-    dest->true_target()->Branch(equal);
-
-    // 'undefined' => false.
-    __ cmp(value.reg(), FACTORY->undefined_value());
-    dest->false_target()->Branch(equal);
-
-    // Smi => false iff zero.
-    STATIC_ASSERT(kSmiTag == 0);
-    __ test(value.reg(), Operand(value.reg()));
-    dest->false_target()->Branch(zero);
-    __ test(value.reg(), Immediate(kSmiTagMask));
-    dest->true_target()->Branch(zero);
-
-    // Call the stub for all other cases.
-    frame_->Push(&value);  // Undo the Pop() from above.
-    ToBooleanStub stub;
-    Result temp = frame_->CallStub(&stub, 1);
-    // Convert the result to a condition code.
-    __ test(temp.reg(), Operand(temp.reg()));
-    temp.Unuse();
-    dest->Split(not_equal);
-  }
-}
-
-
-// Perform or call the specialized stub for a binary operation.  Requires the
-// three registers left, right and dst to be distinct and spilled.  This
-// deferred operation has up to three entry points:  The main one calls the
-// runtime system.  The second is for when the result is a non-Smi.  The
-// third is for when at least one of the inputs is non-Smi and we have SSE2.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
-  DeferredInlineBinaryOperation(Token::Value op,
-                                Register dst,
-                                Register left,
-                                Register right,
-                                TypeInfo left_info,
-                                TypeInfo right_info,
-                                OverwriteMode mode)
-      : op_(op), dst_(dst), left_(left), right_(right),
-        left_info_(left_info), right_info_(right_info), mode_(mode) {
-    set_comment("[ DeferredInlineBinaryOperation");
-    ASSERT(!left.is(right));
-  }
-
-  virtual void Generate();
-
-  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
-  // Exit().
-  virtual bool AutoSaveAndRestore() { return false; }
-
-  void JumpToAnswerOutOfRange(Condition cond);
-  void JumpToConstantRhs(Condition cond, Smi* smi_value);
-  Label* NonSmiInputLabel();
-
- private:
-  void GenerateAnswerOutOfRange();
-  void GenerateNonSmiInput();
-
-  Token::Value op_;
-  Register dst_;
-  Register left_;
-  Register right_;
-  TypeInfo left_info_;
-  TypeInfo right_info_;
-  OverwriteMode mode_;
-  Label answer_out_of_range_;
-  Label non_smi_input_;
-  Label constant_rhs_;
-  Smi* smi_value_;
-};
-
-
-Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
-  if (Token::IsBitOp(op_) &&
-      masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-    return &non_smi_input_;
-  } else {
-    return entry_label();
-  }
-}
-
-
-void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
-  __ j(cond, &answer_out_of_range_);
-}
-
-
-void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
-                                                      Smi* smi_value) {
-  smi_value_ = smi_value;
-  __ j(cond, &constant_rhs_);
-}
-
-
-void DeferredInlineBinaryOperation::Generate() {
-  // Registers are not saved implicitly for this stub, so we should not
-  // tread on the registers that were not passed to us.
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
-      ((op_ == Token::ADD) ||
-       (op_ == Token::SUB) ||
-       (op_ == Token::MUL) ||
-       (op_ == Token::DIV))) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    Label call_runtime, after_alloc_failure;
-    Label left_smi, right_smi, load_right, do_op;
-    if (!left_info_.IsSmi()) {
-      __ test(left_, Immediate(kSmiTagMask));
-      __ j(zero, &left_smi);
-      if (!left_info_.IsNumber()) {
-        __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        __ j(not_equal, &call_runtime);
-      }
-      __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
-      if (mode_ == OVERWRITE_LEFT) {
-        __ mov(dst_, left_);
-      }
-      __ jmp(&load_right);
-
-      __ bind(&left_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left_);
-    }
-    __ SmiUntag(left_);
-    __ cvtsi2sd(xmm0, Operand(left_));
-    __ SmiTag(left_);
-    if (mode_ == OVERWRITE_LEFT) {
-      Label alloc_failure;
-      __ push(left_);
-      __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-      __ pop(left_);
-    }
-
-    __ bind(&load_right);
-    if (!right_info_.IsSmi()) {
-      __ test(right_, Immediate(kSmiTagMask));
-      __ j(zero, &right_smi);
-      if (!right_info_.IsNumber()) {
-        __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        __ j(not_equal, &call_runtime);
-      }
-      __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
-      if (mode_ == OVERWRITE_RIGHT) {
-        __ mov(dst_, right_);
-      } else if (mode_ == NO_OVERWRITE) {
-        Label alloc_failure;
-        __ push(left_);
-        __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-        __ pop(left_);
-      }
-      __ jmp(&do_op);
-
-      __ bind(&right_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right_);
-    }
-    __ SmiUntag(right_);
-    __ cvtsi2sd(xmm1, Operand(right_));
-    __ SmiTag(right_);
-    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
-      __ push(left_);
-      __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
-      __ pop(left_);
-    }
-
-    __ bind(&do_op);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
-    }
-    __ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
-    Exit();
-
-
-    __ bind(&after_alloc_failure);
-    __ pop(left_);
-    __ bind(&call_runtime);
-  }
-  // Register spilling is not done implicitly for this stub.
-  // We can't postpone it any more now though.
-  SaveRegisters();
-
-  GenericBinaryOpStub stub(op_,
-                           mode_,
-                           NO_SMI_CODE_IN_STUB,
-                           TypeInfo::Combine(left_info_, right_info_));
-  stub.GenerateCall(masm_, left_, right_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-  RestoreRegisters();
-  Exit();
-
-  if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
-    GenerateNonSmiInput();
-  }
-  if (answer_out_of_range_.is_linked()) {
-    GenerateAnswerOutOfRange();
-  }
-}
-
-
-void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
-  // We know at least one of the inputs was not a Smi.
-  // This is a third entry point into the deferred code.
-  // We may not overwrite left_ because we want to be able
-  // to call the handling code for non-smi answer and it
-  // might want to overwrite the heap number in left_.
-  ASSERT(!right_.is(dst_));
-  ASSERT(!left_.is(dst_));
-  ASSERT(!left_.is(right_));
-  // This entry point is used for bit ops where the right hand side
-  // is a constant Smi and the left hand side is a heap object.  It
-  // is also used for bit ops where both sides are unknown, but where
-  // at least one of them is a heap object.
-  bool rhs_is_constant = constant_rhs_.is_linked();
-  // We can't generate code for both cases.
-  ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
-
-  if (FLAG_debug_code) {
-    __ int3();  // We don't fall through into this code.
-  }
-
-  __ bind(&non_smi_input_);
-
-  if (rhs_is_constant) {
-    __ bind(&constant_rhs_);
-    // In this case the input is a heap object and it is in the dst_ register.
-    // The left_ and right_ registers have not been initialized yet.
-    __ mov(right_, Immediate(smi_value_));
-    __ mov(left_, Operand(dst_));
-    if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-      __ jmp(entry_label());
-      return;
-    } else {
-      CpuFeatures::Scope use_sse2(SSE2);
-      __ JumpIfNotNumber(dst_, left_info_, entry_label());
-      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
-      __ SmiUntag(right_);
-    }
-  } else {
-    // We know we have SSE2 here because otherwise the label is not linked (see
-    // NonSmiInputLabel).
-    CpuFeatures::Scope use_sse2(SSE2);
-    // Handle the non-constant right hand side situation:
-    if (left_info_.IsSmi()) {
-      // Right is a heap object.
-      __ JumpIfNotNumber(right_, right_info_, entry_label());
-      __ ConvertToInt32(right_, right_, dst_, right_info_, entry_label());
-      __ mov(dst_, Operand(left_));
-      __ SmiUntag(dst_);
-    } else if (right_info_.IsSmi()) {
-      // Left is a heap object.
-      __ JumpIfNotNumber(left_, left_info_, entry_label());
-      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
-      __ SmiUntag(right_);
-    } else {
-      // Here we don't know if it's one or both that is a heap object.
-      Label only_right_is_heap_object, got_both;
-      __ mov(dst_, Operand(left_));
-      __ SmiUntag(dst_, &only_right_is_heap_object);
-      // Left was a heap object.
-      __ JumpIfNotNumber(left_, left_info_, entry_label());
-      __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
-      __ SmiUntag(right_, &got_both);
-      // Both were heap objects.
-      __ rcl(right_, 1);  // Put tag back.
-      __ JumpIfNotNumber(right_, right_info_, entry_label());
-      __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
-      __ jmp(&got_both);
-      __ bind(&only_right_is_heap_object);
-      __ JumpIfNotNumber(right_, right_info_, entry_label());
-      __ ConvertToInt32(right_, right_, no_reg, right_info_, entry_label());
-      __ bind(&got_both);
-    }
-  }
-  ASSERT(op_ == Token::BIT_AND ||
-         op_ == Token::BIT_OR ||
-         op_ == Token::BIT_XOR ||
-         right_.is(ecx));
-  switch (op_) {
-    case Token::BIT_AND: __ and_(dst_, Operand(right_));  break;
-    case Token::BIT_OR:   __ or_(dst_, Operand(right_));  break;
-    case Token::BIT_XOR: __ xor_(dst_, Operand(right_));  break;
-    case Token::SHR:     __ shr_cl(dst_);  break;
-    case Token::SAR:     __ sar_cl(dst_);  break;
-    case Token::SHL:     __ shl_cl(dst_);  break;
-    default: UNREACHABLE();
-  }
-  if (op_ == Token::SHR) {
-    // Check that the *unsigned* result fits in a smi.  Neither of
-    // the two high-order bits can be set:
-    //  * 0x80000000: high bit would be lost when smi tagging.
-    //  * 0x40000000: this number would convert to negative when smi
-    //    tagging.
-    __ test(dst_, Immediate(0xc0000000));
-    __ j(not_zero, &answer_out_of_range_);
-  } else {
-    // Check that the *signed* result fits in a smi.
-    __ cmp(dst_, 0xc0000000);
-    __ j(negative, &answer_out_of_range_);
-  }
-  __ SmiTag(dst_);
-  Exit();
-}
-
-
-void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
-  Label after_alloc_failure2;
-  Label allocation_ok;
-  __ bind(&after_alloc_failure2);
-  // We have to allocate a number, causing a GC, while keeping hold of
-  // the answer in dst_.  The answer is not a Smi.  We can't just call the
-  // runtime shift function here because we already threw away the inputs.
-  __ xor_(left_, Operand(left_));
-  __ shl(dst_, 1);  // Put top bit in carry flag and Smi tag the low bits.
-  __ rcr(left_, 1);  // Rotate with carry.
-  __ push(dst_);   // Smi tagged low 31 bits.
-  __ push(left_);  // 0 or 0x80000000, which is Smi tagged in both cases.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  if (!left_.is(eax)) {
-    __ mov(left_, eax);
-  }
-  __ pop(right_);   // High bit.
-  __ pop(dst_);     // Low 31 bits.
-  __ shr(dst_, 1);  // Put 0 in top bit.
-  __ or_(dst_, Operand(right_));
-  __ jmp(&allocation_ok);
-
-  // This is the second entry point to the deferred code.  It is used only by
-  // the bit operations.
-  // The dst_ register has the answer.  It is not Smi tagged.  If mode_ is
-  // OVERWRITE_LEFT then left_ must contain either an overwritable heap number
-  // or a Smi.
-  // Put a heap number pointer in left_.
-  __ bind(&answer_out_of_range_);
-  SaveRegisters();
-  if (mode_ == OVERWRITE_LEFT) {
-    __ test(left_, Immediate(kSmiTagMask));
-    __ j(not_zero, &allocation_ok);
-  }
-  // This trashes right_.
-  __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
-  __ bind(&allocation_ok);
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2) &&
-      op_ != Token::SHR) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    ASSERT(Token::IsBitOp(op_));
-    // Signed conversion.
-    __ cvtsi2sd(xmm0, Operand(dst_));
-    __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
-  } else {
-    if (op_ == Token::SHR) {
-      __ push(Immediate(0));  // High word of unsigned value.
-      __ push(dst_);
-      __ fild_d(Operand(esp, 0));
-      __ Drop(2);
-    } else {
-      ASSERT(Token::IsBitOp(op_));
-      __ push(dst_);
-      __ fild_s(Operand(esp, 0));  // Signed conversion.
-      __ pop(dst_);
-    }
-    __ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
-  }
-  __ mov(dst_, left_);
-  RestoreRegisters();
-  Exit();
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
-                                  Token::Value op,
-                                  const Result& right,
-                                  const Result& left) {
-  // Set TypeInfo of result according to the operation performed.
-  // Rely on the fact that smis have a 31 bit payload on ia32.
-  STATIC_ASSERT(kSmiValueSize == 31);
-  switch (op) {
-    case Token::COMMA:
-      return right.type_info();
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      return operands_type;
-    case Token::BIT_AND: {
-      // Anding with positive Smis will give you a Smi.
-      if (right.is_constant() && right.handle()->IsSmi() &&
-          Smi::cast(*right.handle())->value() >= 0) {
-        return TypeInfo::Smi();
-      } else if (left.is_constant() && left.handle()->IsSmi() &&
-          Smi::cast(*left.handle())->value() >= 0) {
-        return TypeInfo::Smi();
-      }
-      return (operands_type.IsSmi())
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    }
-    case Token::BIT_OR: {
-      // Oring with negative Smis will give you a Smi.
-      if (right.is_constant() && right.handle()->IsSmi() &&
-          Smi::cast(*right.handle())->value() < 0) {
-        return TypeInfo::Smi();
-      } else if (left.is_constant() && left.handle()->IsSmi() &&
-          Smi::cast(*left.handle())->value() < 0) {
-        return TypeInfo::Smi();
-      }
-      return (operands_type.IsSmi())
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    }
-    case Token::BIT_XOR:
-      // Result is always a 32 bit integer. Smi property of inputs is preserved.
-      return (operands_type.IsSmi())
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    case Token::SAR:
-      if (left.is_smi()) return TypeInfo::Smi();
-      // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
-      // Shift amount is masked with 0x1F (ECMA standard 11.7.2).
-      return (right.is_constant() && right.handle()->IsSmi()
-              && (Smi::cast(*right.handle())->value() & 0x1F)  >= 1)
-          ? TypeInfo::Smi()
-          : TypeInfo::Integer32();
-    case Token::SHR:
-      // Result is a smi if we shift by a constant >= 2, an integer32 if
-      // we shift by 1, and an unsigned 32-bit integer if we shift by 0.
-      if (right.is_constant() && right.handle()->IsSmi()) {
-        int shift_amount = Smi::cast(*right.handle())->value() & 0x1F;
-        if (shift_amount > 1) {
-          return TypeInfo::Smi();
-        } else if (shift_amount > 0) {
-          return TypeInfo::Integer32();
-        }
-      }
-      return TypeInfo::Number();
-    case Token::ADD:
-      if (operands_type.IsSmi()) {
-        // The Integer32 range is big enough to take the sum of any two Smis.
-        return TypeInfo::Integer32();
-      } else if (operands_type.IsNumber()) {
-        return TypeInfo::Number();
-      } else if (left.type_info().IsString() || right.type_info().IsString()) {
-        return TypeInfo::String();
-      } else {
-        return TypeInfo::Unknown();
-      }
-    case Token::SHL:
-      return TypeInfo::Integer32();
-    case Token::SUB:
-      // The Integer32 range is big enough to take the difference of any two
-      // Smis.
-      return (operands_type.IsSmi()) ?
-                    TypeInfo::Integer32() :
-                    TypeInfo::Number();
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      return TypeInfo::Number();
-    default:
-      UNREACHABLE();
-  }
-  UNREACHABLE();
-  return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
-                                           OverwriteMode overwrite_mode) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = expr->op();
-  Comment cmnt_token(masm_, Token::String(op));
-
-  if (op == Token::COMMA) {
-    // Simply discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  if (op == Token::ADD) {
-    const bool left_is_string = left.type_info().IsString();
-    const bool right_is_string = right.type_info().IsString();
-    // Make sure constant strings have string type info.
-    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
-           left_is_string);
-    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
-           right_is_string);
-    if (left_is_string || right_is_string) {
-      frame_->Push(&left);
-      frame_->Push(&right);
-      Result answer;
-      if (left_is_string) {
-        if (right_is_string) {
-          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        } else {
-          StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        }
-      } else if (right_is_string) {
-        StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
-        answer = frame_->CallStub(&stub, 2);
-      }
-      answer.set_type_info(TypeInfo::String());
-      frame_->Push(&answer);
-      return;
-    }
-    // Neither operand is known to be a string.
-  }
-
-  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi_constant =
-      right.is_constant() && !right.handle()->IsSmi();
-
-  if (left_is_smi_constant && right_is_smi_constant) {
-    // Compute the constant result at compile time, and leave it on the frame.
-    int left_int = Smi::cast(*left.handle())->value();
-    int right_int = Smi::cast(*right.handle())->value();
-    if (FoldConstantSmis(op, left_int, right_int)) return;
-  }
-
-  // Get number type of left and right sub-expressions.
-  TypeInfo operands_type =
-      TypeInfo::Combine(left.type_info(), right.type_info());
-
-  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
-  Result answer;
-  if (left_is_non_smi_constant || right_is_non_smi_constant) {
-    // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op,
-                             overwrite_mode,
-                             NO_SMI_CODE_IN_STUB,
-                             operands_type);
-    answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-  } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
-                                        false, overwrite_mode);
-  } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
-                                        true, overwrite_mode);
-  } else {
-    // Set the flags based on the operation, type and loop nesting level.
-    // Bit operations always assume they likely operate on Smis. Still only
-    // generate the inline Smi check code if this operation is part of a loop.
-    // For all other operations only inline the Smi check code for likely smis
-    // if the operation is part of a loop.
-    if (loop_nesting() > 0 &&
-        (Token::IsBitOp(op) ||
-         operands_type.IsInteger32() ||
-         expr->type()->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
-    } else {
-      GenericBinaryOpStub stub(op,
-                               overwrite_mode,
-                               NO_GENERIC_BINARY_FLAGS,
-                               operands_type);
-      answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-    }
-  }
-
-  answer.set_type_info(result_type);
-  frame_->Push(&answer);
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                                      Result* left,
-                                                      Result* right) {
-  if (stub->ArgsInRegistersSupported()) {
-    stub->SetArgsInRegisters();
-    return frame_->CallStub(stub, left, right);
-  } else {
-    frame_->Push(left);
-    frame_->Push(right);
-    return frame_->CallStub(stub, 2);
-  }
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
-  Object* answer_object = HEAP->undefined_value();
-  switch (op) {
-    case Token::ADD:
-      if (Smi::IsValid(left + right)) {
-        answer_object = Smi::FromInt(left + right);
-      }
-      break;
-    case Token::SUB:
-      if (Smi::IsValid(left - right)) {
-        answer_object = Smi::FromInt(left - right);
-      }
-      break;
-    case Token::MUL: {
-        double answer = static_cast<double>(left) * right;
-        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
-          // If the product is zero and the non-zero factor is negative,
-          // the spec requires us to return floating point negative zero.
-          if (answer != 0 || (left >= 0 && right >= 0)) {
-            answer_object = Smi::FromInt(static_cast<int>(answer));
-          }
-        }
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD:
-      break;
-    case Token::BIT_OR:
-      answer_object = Smi::FromInt(left | right);
-      break;
-    case Token::BIT_AND:
-      answer_object = Smi::FromInt(left & right);
-      break;
-    case Token::BIT_XOR:
-      answer_object = Smi::FromInt(left ^ right);
-      break;
-
-    case Token::SHL: {
-        int shift_amount = right & 0x1F;
-        if (Smi::IsValid(left << shift_amount)) {
-          answer_object = Smi::FromInt(left << shift_amount);
-        }
-        break;
-      }
-    case Token::SHR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        unsigned_left >>= shift_amount;
-        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
-          answer_object = Smi::FromInt(unsigned_left);
-        }
-        break;
-      }
-    case Token::SAR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        if (left < 0) {
-          // Perform arithmetic shift of a negative number by
-          // complementing number, logical shifting, complementing again.
-          unsigned_left = ~unsigned_left;
-          unsigned_left >>= shift_amount;
-          unsigned_left = ~unsigned_left;
-        } else {
-          unsigned_left >>= shift_amount;
-        }
-        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
-        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
-        break;
-      }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  if (answer_object->IsUndefined()) {
-    return false;
-  }
-  frame_->Push(Handle<Object>(answer_object));
-  return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
-                                               Result* right,
-                                               JumpTarget* both_smi) {
-  TypeInfo left_info = left->type_info();
-  TypeInfo right_info = right->type_info();
-  if (left_info.IsDouble() || left_info.IsString() ||
-      right_info.IsDouble() || right_info.IsString()) {
-    // We know that left and right are not both smi.  Don't do any tests.
-    return;
-  }
-
-  if (left->reg().is(right->reg())) {
-    if (!left_info.IsSmi()) {
-      __ test(left->reg(), Immediate(kSmiTagMask));
-      both_smi->Branch(zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(), left->reg());
-      __ or_(temp.reg(), Operand(right->reg()));
-      __ test(temp.reg(), Immediate(kSmiTagMask));
-      temp.Unuse();
-      both_smi->Branch(zero);
-    } else {
-      __ test(left->reg(), Immediate(kSmiTagMask));
-      both_smi->Branch(zero);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    if (!right_info.IsSmi()) {
-      __ test(right->reg(), Immediate(kSmiTagMask));
-      both_smi->Branch(zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  Register scratch,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  DeferredCode* deferred) {
-  JumpIfNotBothSmiUsingTypeInfo(left,
-                                right,
-                                scratch,
-                                left_info,
-                                right_info,
-                                deferred->entry_label());
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  Register scratch,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  Label* on_not_smi) {
-  if (left.is(right)) {
-    if (!left_info.IsSmi()) {
-      __ test(left, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left);
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      __ mov(scratch, left);
-      __ or_(scratch, Operand(right));
-      __ test(scratch, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-    } else {
-      __ test(left, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-      if (FLAG_debug_code) __ AbortIfNotSmi(right);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left);
-    if (!right_info.IsSmi()) {
-      __ test(right, Immediate(kSmiTagMask));
-      __ j(not_zero, on_not_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right);
-    }
-  }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
-                                               Result* left,
-                                               Result* right,
-                                               OverwriteMode overwrite_mode) {
-  // Copy the type info because left and right may be overwritten.
-  TypeInfo left_type_info = left->type_info();
-  TypeInfo right_type_info = right->type_info();
-  Token::Value op = expr->op();
-  Result answer;
-  // Special handling of div and mod because they use fixed registers.
-  if (op == Token::DIV || op == Token::MOD) {
-    // We need eax as the quotient register, edx as the remainder
-    // register, neither left nor right in eax or edx, and left copied
-    // to eax.
-    Result quotient;
-    Result remainder;
-    bool left_is_in_eax = false;
-    // Step 1: get eax for quotient.
-    if ((left->is_register() && left->reg().is(eax)) ||
-        (right->is_register() && right->reg().is(eax))) {
-      // One or both is in eax.  Use a fresh non-edx register for
-      // them.
-      Result fresh = allocator_->Allocate();
-      ASSERT(fresh.is_valid());
-      if (fresh.reg().is(edx)) {
-        remainder = fresh;
-        fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-      }
-      if (left->is_register() && left->reg().is(eax)) {
-        quotient = *left;
-        *left = fresh;
-        left_is_in_eax = true;
-      }
-      if (right->is_register() && right->reg().is(eax)) {
-        quotient = *right;
-        *right = fresh;
-      }
-      __ mov(fresh.reg(), eax);
-    } else {
-      // Neither left nor right is in eax.
-      quotient = allocator_->Allocate(eax);
-    }
-    ASSERT(quotient.is_register() && quotient.reg().is(eax));
-    ASSERT(!(left->is_register() && left->reg().is(eax)));
-    ASSERT(!(right->is_register() && right->reg().is(eax)));
-
-    // Step 2: get edx for remainder if necessary.
-    if (!remainder.is_valid()) {
-      if ((left->is_register() && left->reg().is(edx)) ||
-          (right->is_register() && right->reg().is(edx))) {
-        Result fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-        if (left->is_register() && left->reg().is(edx)) {
-          remainder = *left;
-          *left = fresh;
-        }
-        if (right->is_register() && right->reg().is(edx)) {
-          remainder = *right;
-          *right = fresh;
-        }
-        __ mov(fresh.reg(), edx);
-      } else {
-        // Neither left nor right is in edx.
-        remainder = allocator_->Allocate(edx);
-      }
-    }
-    ASSERT(remainder.is_register() && remainder.reg().is(edx));
-    ASSERT(!(left->is_register() && left->reg().is(edx)));
-    ASSERT(!(right->is_register() && right->reg().is(edx)));
-
-    left->ToRegister();
-    right->ToRegister();
-    frame_->Spill(eax);
-    frame_->Spill(edx);
-    // DeferredInlineBinaryOperation requires all the registers that it is
-    // told about to be spilled and distinct.
-    Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-
-    // Check that left and right are smi tagged.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          (op == Token::DIV) ? eax : edx,
-                                          left->reg(),
-                                          distinct_right.reg(),
-                                          left_type_info,
-                                          right_type_info,
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), edx,
-                                  left_type_info, right_type_info, deferred);
-    if (!left_is_in_eax) {
-      __ mov(eax, left->reg());
-    }
-    // Sign extend eax into edx:eax.
-    __ cdq();
-    // Check for 0 divisor.
-    __ test(right->reg(), Operand(right->reg()));
-    deferred->Branch(zero);
-    // Divide edx:eax by the right operand.
-    __ idiv(right->reg());
-
-    // Complete the operation.
-    if (op == Token::DIV) {
-      // Check for negative zero result.  If result is zero, and divisor
-      // is negative, return a floating point negative zero.  The
-      // virtual frame is unchanged in this block, so local control flow
-      // can use a Label rather than a JumpTarget.  If the context of this
-      // expression will treat -0 like 0, do not do this test.
-      if (!expr->no_negative_zero()) {
-        Label non_zero_result;
-        __ test(left->reg(), Operand(left->reg()));
-        __ j(not_zero, &non_zero_result);
-        __ test(right->reg(), Operand(right->reg()));
-        deferred->Branch(negative);
-        __ bind(&non_zero_result);
-      }
-      // Check for the corner case of dividing the most negative smi by
-      // -1. We cannot use the overflow flag, since it is not set by
-      // idiv instruction.
-      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-      __ cmp(eax, 0x40000000);
-      deferred->Branch(equal);
-      // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
-      deferred->Branch(not_zero);
-      // Tag the result and store it in the quotient register.
-      __ SmiTag(eax);
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = quotient;
-    } else {
-      ASSERT(op == Token::MOD);
-      // Check for a negative zero result.  If the result is zero, and
-      // the dividend is negative, return a floating point negative
-      // zero.  The frame is unchanged in this block, so local control
-      // flow can use a Label rather than a JumpTarget.
-      if (!expr->no_negative_zero()) {
-        Label non_zero_result;
-        __ test(edx, Operand(edx));
-        __ j(not_zero, &non_zero_result, taken);
-        __ test(left->reg(), Operand(left->reg()));
-        deferred->Branch(negative);
-        __ bind(&non_zero_result);
-      }
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = remainder;
-    }
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Special handling of shift operations because they use fixed
-  // registers.
-  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
-    // Move left out of ecx if necessary.
-    if (left->is_register() && left->reg().is(ecx)) {
-      *left = allocator_->Allocate();
-      ASSERT(left->is_valid());
-      __ mov(left->reg(), ecx);
-    }
-    right->ToRegister(ecx);
-    left->ToRegister();
-    ASSERT(left->is_register() && !left->reg().is(ecx));
-    ASSERT(right->is_register() && right->reg().is(ecx));
-    if (left_type_info.IsSmi()) {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    }
-    if (right_type_info.IsSmi()) {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-    }
-
-    // We will modify right, it must be spilled.
-    frame_->Spill(ecx);
-    // DeferredInlineBinaryOperation requires all the registers that it is told
-    // about to be spilled and distinct.  We know that right is ecx and left is
-    // not ecx.
-    frame_->Spill(left->reg());
-
-    // Use a fresh answer register to avoid spilling the left operand.
-    answer = allocator_->Allocate();
-    ASSERT(answer.is_valid());
-
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          answer.reg(),
-                                          left->reg(),
-                                          ecx,
-                                          left_type_info,
-                                          right_type_info,
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
-                                  left_type_info, right_type_info,
-                                  deferred->NonSmiInputLabel());
-
-    // Untag both operands.
-    __ mov(answer.reg(), left->reg());
-    __ SmiUntag(answer.reg());
-    __ SmiUntag(right->reg());  // Right is ecx.
-
-    // Perform the operation.
-    ASSERT(right->reg().is(ecx));
-    switch (op) {
-      case Token::SAR: {
-        __ sar_cl(answer.reg());
-        if (!left_type_info.IsSmi()) {
-          // Check that the *signed* result fits in a smi.
-          __ cmp(answer.reg(), 0xc0000000);
-          deferred->JumpToAnswerOutOfRange(negative);
-        }
-        break;
-      }
-      case Token::SHR: {
-        __ shr_cl(answer.reg());
-        // Check that the *unsigned* result fits in a smi.  Neither of
-        // the two high-order bits can be set:
-        //  * 0x80000000: high bit would be lost when smi tagging.
-        //  * 0x40000000: this number would convert to negative when smi
-        //    tagging.
-        // These two cases can only happen with shifts by 0 or 1 when
-        // handed a valid smi.  If the answer cannot be represented by a
-        // smi, restore the left and right arguments, and jump to slow
-        // case.  The low bit of the left argument may be lost, but only
-        // in a case where it is dropped anyway.
-        __ test(answer.reg(), Immediate(0xc0000000));
-        deferred->JumpToAnswerOutOfRange(not_zero);
-        break;
-      }
-      case Token::SHL: {
-        __ shl_cl(answer.reg());
-        // Check that the *signed* result fits in a smi.
-        __ cmp(answer.reg(), 0xc0000000);
-        deferred->JumpToAnswerOutOfRange(negative);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-    // Smi-tag the result in answer.
-    __ SmiTag(answer.reg());
-    deferred->BindExit();
-    left->Unuse();
-    right->Unuse();
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Handle the other binary operations.
-  left->ToRegister();
-  right->ToRegister();
-  // DeferredInlineBinaryOperation requires all the registers that it is told
-  // about to be spilled.
-  Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
-  // A newly allocated register answer is used to hold the answer.  The
-  // registers containing left and right are not modified so they don't
-  // need to be spilled in the fast case.
-  answer = allocator_->Allocate();
-  ASSERT(answer.is_valid());
-
-  // Perform the smi tag check.
-  DeferredInlineBinaryOperation* deferred =
-      new DeferredInlineBinaryOperation(op,
-                                        answer.reg(),
-                                        left->reg(),
-                                        distinct_right.reg(),
-                                        left_type_info,
-                                        right_type_info,
-                                        overwrite_mode);
-  Label non_smi_bit_op;
-  if (op != Token::BIT_OR) {
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
-                                  left_type_info, right_type_info,
-                                  deferred->NonSmiInputLabel());
-  }
-
-  __ mov(answer.reg(), left->reg());
-  switch (op) {
-    case Token::ADD:
-      __ add(answer.reg(), Operand(right->reg()));
-      deferred->Branch(overflow);
-      break;
-
-    case Token::SUB:
-      __ sub(answer.reg(), Operand(right->reg()));
-      deferred->Branch(overflow);
-      break;
-
-    case Token::MUL: {
-      // If the smi tag is 0 we can just leave the tag on one operand.
-      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
-      // Remove smi tag from the left operand (but keep sign).
-      // Left-hand operand has been copied into answer.
-      __ SmiUntag(answer.reg());
-      // Do multiplication of smis, leaving result in answer.
-      __ imul(answer.reg(), Operand(right->reg()));
-      // Go slow on overflows.
-      deferred->Branch(overflow);
-      // Check for negative zero result.  If product is zero, and one
-      // argument is negative, go to slow case.  The frame is unchanged
-      // in this block, so local control flow can use a Label rather
-      // than a JumpTarget.
-      if (!expr->no_negative_zero()) {
-        Label non_zero_result;
-        __ test(answer.reg(), Operand(answer.reg()));
-        __ j(not_zero, &non_zero_result, taken);
-        __ mov(answer.reg(), left->reg());
-        __ or_(answer.reg(), Operand(right->reg()));
-        deferred->Branch(negative);
-        __ xor_(answer.reg(), Operand(answer.reg()));  // Positive 0 is correct.
-        __ bind(&non_zero_result);
-      }
-      break;
-    }
-
-    case Token::BIT_OR:
-      __ or_(answer.reg(), Operand(right->reg()));
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      __ j(not_zero, deferred->NonSmiInputLabel());
-      break;
-
-    case Token::BIT_AND:
-      __ and_(answer.reg(), Operand(right->reg()));
-      break;
-
-    case Token::BIT_XOR:
-      __ xor_(answer.reg(), Operand(right->reg()));
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  deferred->BindExit();
-  left->Unuse();
-  right->Unuse();
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             Register dst,
-                             Register src,
-                             TypeInfo type_info,
-                             Smi* value,
-                             OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        src_(src),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
-    set_comment("[ DeferredInlineSmiOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register src_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
-  // For mod we don't generate all the Smi code inline.
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  stub.GenerateCall(masm_, src_, value_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
-  DeferredInlineSmiOperationReversed(Token::Value op,
-                                     Register dst,
-                                     Smi* value,
-                                     Register src,
-                                     TypeInfo type_info,
-                                     OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        src_(src),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperationReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  Register src_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  stub.GenerateCall(masm_, value_, src_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src + value is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
-  DeferredInlineSmiAdd(Register dst,
-                       TypeInfo type_info,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
-    set_comment("[ DeferredInlineSmiAdd");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
-  // Undo the optimistic add operation and call the shared stub.
-  __ sub(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(
-      Token::ADD,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of value + src is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
-  DeferredInlineSmiAddReversed(Register dst,
-                               TypeInfo type_info,
-                               Smi* value,
-                               OverwriteMode overwrite_mode)
-      : dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAddReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
-  // Undo the optimistic add operation and call the shared stub.
-  __ sub(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(
-      Token::ADD,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  igostub.GenerateCall(masm_, value_, dst_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The result of src - value is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract.  The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
-  DeferredInlineSmiSub(Register dst,
-                       TypeInfo type_info,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst),
-        type_info_(type_info),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
-    set_comment("[ DeferredInlineSmiSub");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  TypeInfo type_info_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
-  // Undo the optimistic sub operation and call the shared stub.
-  __ add(Operand(dst_), Immediate(value_));
-  GenericBinaryOpStub igostub(
-      Token::SUB,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB,
-      TypeInfo::Combine(TypeInfo::Smi(), type_info_));
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                                 Result* operand,
-                                                 Handle<Object> value,
-                                                 bool reversed,
-                                                 OverwriteMode overwrite_mode) {
-  // Generate inline code for a binary operation when one of the
-  // operands is a constant smi.  Consumes the argument "operand".
-  if (IsUnsafeSmi(value)) {
-    Result unsafe_operand(value);
-    if (reversed) {
-      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
-                                      overwrite_mode);
-    } else {
-      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
-                                      overwrite_mode);
-    }
-  }
-
-  // Get the literal value.
-  Smi* smi_value = Smi::cast(*value);
-  int int_value = smi_value->value();
-
-  Token::Value op = expr->op();
-  Result answer;
-  switch (op) {
-    case Token::ADD: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-
-      // Optimistically add.  Call the specialized add stub if the
-      // result is not a smi or overflows.
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
-                                                    operand->type_info(),
-                                                    smi_value,
-                                                    overwrite_mode);
-      } else {
-        deferred = new DeferredInlineSmiAdd(operand->reg(),
-                                            operand->type_info(),
-                                            smi_value,
-                                            overwrite_mode);
-      }
-      __ add(Operand(operand->reg()), Immediate(value));
-      deferred->Branch(overflow);
-      if (!operand->type_info().IsSmi()) {
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-      } else if (FLAG_debug_code) {
-        __ AbortIfNotSmi(operand->reg());
-      }
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::SUB: {
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        // The reversed case is only hit when the right operand is not a
-        // constant.
-        ASSERT(operand->is_register());
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        __ Set(answer.reg(), Immediate(value));
-        deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   operand->reg(),
-                                                   operand->type_info(),
-                                                   overwrite_mode);
-        __ sub(answer.reg(), Operand(operand->reg()));
-      } else {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        answer = *operand;
-        deferred = new DeferredInlineSmiSub(operand->reg(),
-                                            operand->type_info(),
-                                            smi_value,
-                                            overwrite_mode);
-        __ sub(Operand(operand->reg()), Immediate(value));
-      }
-      deferred->Branch(overflow);
-      if (!operand->type_info().IsSmi()) {
-        __ test(answer.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-      } else if (FLAG_debug_code) {
-        __ AbortIfNotSmi(operand->reg());
-      }
-      deferred->BindExit();
-      operand->Unuse();
-      break;
-    }
-
-    case Token::SAR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        if (!operand->type_info().IsSmi()) {
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             operand->type_info(),
-                                             smi_value,
-                                             overwrite_mode);
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-          if (shift_value > 0) {
-            __ sar(operand->reg(), shift_value);
-            __ and_(operand->reg(), ~kSmiTagMask);
-          }
-          deferred->BindExit();
-        } else {
-          if (FLAG_debug_code) {
-            __ AbortIfNotSmi(operand->reg());
-          }
-          if (shift_value > 0) {
-            __ sar(operand->reg(), shift_value);
-            __ and_(operand->reg(), ~kSmiTagMask);
-          }
-        }
-        answer = *operand;
-      }
-      break;
-
-    case Token::SHR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           answer.reg(),
-                                           operand->reg(),
-                                           operand->type_info(),
-                                           smi_value,
-                                           overwrite_mode);
-        if (!operand->type_info().IsSmi()) {
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-        } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(operand->reg());
-        }
-        __ mov(answer.reg(), operand->reg());
-        __ SmiUntag(answer.reg());
-        __ shr(answer.reg(), shift_value);
-        // A negative Smi shifted right two is in the positive Smi range.
-        if (shift_value < 2) {
-          __ test(answer.reg(), Immediate(0xc0000000));
-          deferred->Branch(not_zero);
-        }
-        operand->Unuse();
-        __ SmiTag(answer.reg());
-        deferred->BindExit();
-      }
-      break;
-
-    case Token::SHL:
-      if (reversed) {
-        // Move operand into ecx and also into a second register.
-        // If operand is already in a register, take advantage of that.
-        // This lets us modify ecx, but still bail out to deferred code.
-        Result right;
-        Result right_copy_in_ecx;
-        TypeInfo right_type_info = operand->type_info();
-        operand->ToRegister();
-        if (operand->reg().is(ecx)) {
-          right = allocator()->Allocate();
-          __ mov(right.reg(), ecx);
-          frame_->Spill(ecx);
-          right_copy_in_ecx = *operand;
-        } else {
-          right_copy_in_ecx = allocator()->Allocate(ecx);
-          __ mov(ecx, operand->reg());
-          right = *operand;
-        }
-        operand->Unuse();
-
-        answer = allocator()->Allocate();
-        DeferredInlineSmiOperationReversed* deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   right.reg(),
-                                                   right_type_info,
-                                                   overwrite_mode);
-        __ mov(answer.reg(), Immediate(int_value));
-        __ sar(ecx, kSmiTagSize);
-        if (!right_type_info.IsSmi()) {
-          deferred->Branch(carry);
-        } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(right.reg());
-        }
-        __ shl_cl(answer.reg());
-        __ cmp(answer.reg(), 0xc0000000);
-        deferred->Branch(sign);
-        __ SmiTag(answer.reg());
-
-        deferred->BindExit();
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        if (shift_value == 0) {
-          // Spill operand so it can be overwritten in the slow case.
-          frame_->Spill(operand->reg());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             operand->type_info(),
-                                             smi_value,
-                                             overwrite_mode);
-          __ test(operand->reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-          deferred->BindExit();
-          answer = *operand;
-        } else {
-          // Use a fresh temporary for nonzero shift values.
-          answer = allocator()->Allocate();
-          ASSERT(answer.is_valid());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             answer.reg(),
-                                             operand->reg(),
-                                             operand->type_info(),
-                                             smi_value,
-                                             overwrite_mode);
-          if (!operand->type_info().IsSmi()) {
-            __ test(operand->reg(), Immediate(kSmiTagMask));
-            deferred->Branch(not_zero);
-          } else if (FLAG_debug_code) {
-            __ AbortIfNotSmi(operand->reg());
-          }
-          __ mov(answer.reg(), operand->reg());
-          STATIC_ASSERT(kSmiTag == 0);  // adjust code if not the case
-          // We do no shifts, only the Smi conversion, if shift_value is 1.
-          if (shift_value > 1) {
-            __ shl(answer.reg(), shift_value - 1);
-          }
-          // Convert int result to Smi, checking that it is in int range.
-          STATIC_ASSERT(kSmiTagSize == 1);  // adjust code if not the case
-          __ add(answer.reg(), Operand(answer.reg()));
-          deferred->Branch(overflow);
-          deferred->BindExit();
-          operand->Unuse();
-        }
-      }
-      break;
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      operand->ToRegister();
-      // DeferredInlineBinaryOperation requires all the registers that it is
-      // told about to be spilled.
-      frame_->Spill(operand->reg());
-      DeferredInlineBinaryOperation* deferred = NULL;
-      if (!operand->type_info().IsSmi()) {
-        Result left = allocator()->Allocate();
-        ASSERT(left.is_valid());
-        Result right = allocator()->Allocate();
-        ASSERT(right.is_valid());
-        deferred = new DeferredInlineBinaryOperation(
-            op,
-            operand->reg(),
-            left.reg(),
-            right.reg(),
-            operand->type_info(),
-            TypeInfo::Smi(),
-            overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
-        __ test(operand->reg(), Immediate(kSmiTagMask));
-        deferred->JumpToConstantRhs(not_zero, smi_value);
-      } else if (FLAG_debug_code) {
-        __ AbortIfNotSmi(operand->reg());
-      }
-      if (op == Token::BIT_AND) {
-        __ and_(Operand(operand->reg()), Immediate(value));
-      } else if (op == Token::BIT_XOR) {
-        if (int_value != 0) {
-          __ xor_(Operand(operand->reg()), Immediate(value));
-        }
-      } else {
-        ASSERT(op == Token::BIT_OR);
-        if (int_value != 0) {
-          __ or_(Operand(operand->reg()), Immediate(value));
-        }
-      }
-      if (deferred != NULL) deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::DIV:
-      if (!reversed && int_value == 2) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           operand->type_info(),
-                                           smi_value,
-                                           overwrite_mode);
-        // Check that lowest log2(value) bits of operand are zero, and test
-        // smi tag at the same time.
-        STATIC_ASSERT(kSmiTag == 0);
-        STATIC_ASSERT(kSmiTagSize == 1);
-        __ test(operand->reg(), Immediate(3));
-        deferred->Branch(not_zero);  // Branch if non-smi or odd smi.
-        __ sar(operand->reg(), 1);
-        deferred->BindExit();
-        answer = *operand;
-      } else {
-        // Cannot fall through MOD to default case, so we duplicate the
-        // default case here.
-        Result constant_operand(value);
-        if (reversed) {
-          answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                            overwrite_mode);
-        } else {
-          answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                            overwrite_mode);
-        }
-      }
-      break;
-
-    // Generate inline code for mod of powers of 2 and negative powers of 2.
-    case Token::MOD:
-      if (!reversed &&
-          int_value != 0 &&
-          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           operand->type_info(),
-                                           smi_value,
-                                           overwrite_mode);
-        // Check for negative or non-Smi left hand side.
-        __ test(operand->reg(), Immediate(kSmiTagMask | kSmiSignMask));
-        deferred->Branch(not_zero);
-        if (int_value < 0) int_value = -int_value;
-        if (int_value == 1) {
-          __ mov(operand->reg(), Immediate(Smi::FromInt(0)));
-        } else {
-          __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
-        }
-        deferred->BindExit();
-        answer = *operand;
-        break;
-      }
-      // Fall through if we did not find a power of 2 on the right hand side!
-      // The next case must be the default.
-
-    default: {
-      Result constant_operand(value);
-      if (reversed) {
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                          overwrite_mode);
-      }
-      break;
-    }
-  }
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
-  if (result.type_info().IsSmi()) return false;
-  if (result.type_info().IsInteger32()) return false;
-  if (!result.is_constant()) return true;
-  if (!result.handle()->IsHeapNumber()) return false;
-  return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
-  switch (cc) {
-    case less:          return below;
-    case equal:         return equal;
-    case less_equal:    return below_equal;
-    case greater:       return above;
-    case greater_equal: return above_equal;
-    default:            UNREACHABLE();
-  }
-  UNREACHABLE();
-  return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
-                                        bool inline_number_compare) {
-  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
-  if (nan_info == kCantBothBeNaN) {
-    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
-  }
-  if (inline_number_compare) {
-    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
-  }
-  return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
-                               Condition cc,
-                               bool strict,
-                               ControlDestination* dest) {
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == equal);
-
-  Result left_side;
-  Result right_side;
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cc == greater || cc == less_equal) {
-    cc = ReverseCondition(cc);
-    left_side = frame_->Pop();
-    right_side = frame_->Pop();
-  } else {
-    right_side = frame_->Pop();
-    left_side = frame_->Pop();
-  }
-  ASSERT(cc == less || cc == equal || cc == greater_equal);
-
-  // If either side is a constant smi, optimize the comparison.
-  bool left_side_constant_smi = false;
-  bool left_side_constant_null = false;
-  bool left_side_constant_1_char_string = false;
-  if (left_side.is_constant()) {
-    left_side_constant_smi = left_side.handle()->IsSmi();
-    left_side_constant_null = left_side.handle()->IsNull();
-    left_side_constant_1_char_string =
-        (left_side.handle()->IsString() &&
-         String::cast(*left_side.handle())->length() == 1 &&
-         String::cast(*left_side.handle())->IsAsciiRepresentation());
-  }
-  bool right_side_constant_smi = false;
-  bool right_side_constant_null = false;
-  bool right_side_constant_1_char_string = false;
-  if (right_side.is_constant()) {
-    right_side_constant_smi = right_side.handle()->IsSmi();
-    right_side_constant_null = right_side.handle()->IsNull();
-    right_side_constant_1_char_string =
-        (right_side.handle()->IsString() &&
-         String::cast(*right_side.handle())->length() == 1 &&
-         String::cast(*right_side.handle())->IsAsciiRepresentation());
-  }
-
-  if (left_side_constant_smi || right_side_constant_smi) {
-    bool is_loop_condition = (node->AsExpression() != NULL) &&
-        node->AsExpression()->is_loop_condition();
-    ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
-                          left_side_constant_smi, right_side_constant_smi,
-                          is_loop_condition);
-  } else if (left_side_constant_1_char_string ||
-             right_side_constant_1_char_string) {
-    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
-      // Trivial case, comparing two constants.
-      int left_value = String::cast(*left_side.handle())->Get(0);
-      int right_value = String::cast(*right_side.handle())->Get(0);
-      switch (cc) {
-        case less:
-          dest->Goto(left_value < right_value);
-          break;
-        case equal:
-          dest->Goto(left_value == right_value);
-          break;
-        case greater_equal:
-          dest->Goto(left_value >= right_value);
-          break;
-        default:
-          UNREACHABLE();
-      }
-    } else {
-      // Only one side is a constant 1 character string.
-      // If left side is a constant 1-character string, reverse the operands.
-      // Since one side is a constant string, conversion order does not matter.
-      if (left_side_constant_1_char_string) {
-        Result temp = left_side;
-        left_side = right_side;
-        right_side = temp;
-        cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
-        // CompareStub and the inline code both support all values of cc.
-      }
-      // Implement comparison against a constant string, inlining the case
-      // where both sides are strings.
-      left_side.ToRegister();
-
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_not_string, is_string;
-      Register left_reg = left_side.reg();
-      Handle<Object> right_val = right_side.handle();
-      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
-      __ test(left_side.reg(), Immediate(kSmiTagMask));
-      is_not_string.Branch(zero, &left_side);
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(),
-             FieldOperand(left_side.reg(), HeapObject::kMapOffset));
-      __ movzx_b(temp.reg(),
-                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-      // If we are testing for equality then make use of the symbol shortcut.
-      // Check if the right left hand side has the same type as the left hand
-      // side (which is always a symbol).
-      if (cc == equal) {
-        Label not_a_symbol;
-        STATIC_ASSERT(kSymbolTag != 0);
-        // Ensure that no non-strings have the symbol bit set.
-        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
-        __ test(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
-        __ j(zero, &not_a_symbol);
-        // They are symbols, so do identity compare.
-        __ cmp(left_side.reg(), right_side.handle());
-        dest->true_target()->Branch(equal);
-        dest->false_target()->Branch(not_equal);
-        __ bind(&not_a_symbol);
-      }
-      // Call the compare stub if the left side is not a flat ascii string.
-      __ and_(temp.reg(),
-          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
-      __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
-      temp.Unuse();
-      is_string.Branch(equal, &left_side);
-
-      // Setup and call the compare stub.
-      is_not_string.Bind(&left_side);
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, &left_side, &right_side);
-      result.ToRegister();
-      __ cmp(result.reg(), 0);
-      result.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_string.Bind(&left_side);
-      // left_side is a sequential ASCII string.
-      left_side = Result(left_reg);
-      right_side = Result(right_val);
-      // Test string equality and comparison.
-      Label comparison_done;
-      if (cc == equal) {
-        __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(Smi::FromInt(1)));
-        __ j(not_equal, &comparison_done);
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                char_value);
-      } else {
-        __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(Smi::FromInt(1)));
-        // If the length is 0 then the jump is taken and the flags
-        // correctly represent being less than the one-character string.
-        __ j(below, &comparison_done);
-        // Compare the first character of the string with the
-        // constant 1-character string.
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                char_value);
-        __ j(not_equal, &comparison_done);
-        // If the first character is the same then the long string sorts after
-        // the short one.
-        __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
-               Immediate(Smi::FromInt(1)));
-      }
-      __ bind(&comparison_done);
-      left_side.Unuse();
-      right_side.Unuse();
-      dest->Split(cc);
-    }
-  } else {
-    // Neither side is a constant Smi, constant 1-char string or constant null.
-    // If either side is a non-smi constant, or known to be a heap number,
-    // skip the smi check.
-    bool known_non_smi =
-        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
-        left_side.type_info().IsDouble() ||
-        right_side.type_info().IsDouble();
-
-    NaNInformation nan_info =
-        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
-        kBothCouldBeNaN :
-        kCantBothBeNaN;
-
-    // Inline number comparison handling any combination of smi's and heap
-    // numbers if:
-    //   code is in a loop
-    //   the compare operation is different from equal
-    //   compare is not a for-loop comparison
-    // The reason for excluding equal is that it will most likely be done
-    // with smi's (not heap numbers) and the code to comparing smi's is inlined
-    // separately. The same reason applies for for-loop comparison which will
-    // also most likely be smi comparisons.
-    bool is_loop_condition = (node->AsExpression() != NULL)
-        && node->AsExpression()->is_loop_condition();
-    bool inline_number_compare =
-        loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
-    // Left and right needed in registers for the following code.
-    left_side.ToRegister();
-    right_side.ToRegister();
-
-    if (known_non_smi) {
-      // Inlined equality check:
-      // If at least one of the objects is not NaN, then if the objects
-      // are identical, they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
-        __ cmp(left_side.reg(), Operand(right_side.reg()));
-        dest->true_target()->Branch(equal);
-      }
-
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-      }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
-      CompareStub stub(cc, strict, flags);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ test(answer.reg(), Operand(answer.reg()));
-      answer.Unuse();
-      dest->Split(cc);
-    } else {
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_smi;
-      Register left_reg = left_side.reg();
-      Register right_reg = right_side.reg();
-
-      // In-line check for comparing two smis.
-      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
-      if (has_valid_frame()) {
-        // Inline the equality check if both operands can't be a NaN. If both
-        // objects are the same they are equal.
-        if (nan_info == kCantBothBeNaN && cc == equal) {
-          __ cmp(left_side.reg(), Operand(right_side.reg()));
-          dest->true_target()->Branch(equal);
-        }
-
-        // Inlined number comparison:
-        if (inline_number_compare) {
-          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-        }
-
-        // End of in-line compare, call out to the compare stub. Don't include
-        // number comparison in the stub if it was inlined.
-        CompareFlags flags =
-            ComputeCompareFlags(nan_info, inline_number_compare);
-        CompareStub stub(cc, strict, flags);
-        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-        __ test(answer.reg(), Operand(answer.reg()));
-        answer.Unuse();
-        if (is_smi.is_linked()) {
-          dest->true_target()->Branch(cc);
-          dest->false_target()->Jump();
-        } else {
-          dest->Split(cc);
-        }
-      }
-
-      if (is_smi.is_linked()) {
-        is_smi.Bind();
-        left_side = Result(left_reg);
-        right_side = Result(right_reg);
-        __ cmp(left_side.reg(), Operand(right_side.reg()));
-        right_side.Unuse();
-        left_side.Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
-                                          bool strict,
-                                          ControlDestination* dest,
-                                          Result* left_side,
-                                          Result* right_side,
-                                          bool left_side_constant_smi,
-                                          bool right_side_constant_smi,
-                                          bool is_loop_condition) {
-  if (left_side_constant_smi && right_side_constant_smi) {
-    // Trivial case, comparing two constants.
-    int left_value = Smi::cast(*left_side->handle())->value();
-    int right_value = Smi::cast(*right_side->handle())->value();
-    switch (cc) {
-      case less:
-        dest->Goto(left_value < right_value);
-        break;
-      case equal:
-        dest->Goto(left_value == right_value);
-        break;
-      case greater_equal:
-        dest->Goto(left_value >= right_value);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  } else {
-    // Only one side is a constant Smi.
-    // If left side is a constant Smi, reverse the operands.
-    // Since one side is a constant Smi, conversion order does not matter.
-    if (left_side_constant_smi) {
-      Result* temp = left_side;
-      left_side = right_side;
-      right_side = temp;
-      cc = ReverseCondition(cc);
-      // This may re-introduce greater or less_equal as the value of cc.
-      // CompareStub and the inline code both support all values of cc.
-    }
-    // Implement comparison against a constant Smi, inlining the case
-    // where both sides are Smis.
-    left_side->ToRegister();
-    Register left_reg = left_side->reg();
-    Handle<Object> right_val = right_side->handle();
-
-    if (left_side->is_smi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(left_reg);
-      }
-      // Test smi equality and comparison by signed int comparison.
-      if (IsUnsafeSmi(right_side->handle())) {
-        right_side->ToRegister();
-        __ cmp(left_reg, Operand(right_side->reg()));
-      } else {
-        __ cmp(Operand(left_reg), Immediate(right_side->handle()));
-      }
-      left_side->Unuse();
-      right_side->Unuse();
-      dest->Split(cc);
-    } else {
-      // Only the case where the left side could possibly be a non-smi is left.
-      JumpTarget is_smi;
-      if (cc == equal) {
-        // We can do the equality comparison before the smi check.
-        __ cmp(Operand(left_reg), Immediate(right_side->handle()));
-        dest->true_target()->Branch(equal);
-        __ test(left_reg, Immediate(kSmiTagMask));
-        dest->false_target()->Branch(zero);
-      } else {
-        // Do the smi check, then the comparison.
-        __ test(left_reg, Immediate(kSmiTagMask));
-        is_smi.Branch(zero, left_side, right_side);
-      }
-
-      // Jump or fall through to here if we are comparing a non-smi to a
-      // constant smi.  If the non-smi is a heap number and this is not
-      // a loop condition, inline the floating point code.
-      if (!is_loop_condition &&
-          masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-        // Right side is a constant smi and left side has been checked
-        // not to be a smi.
-        CpuFeatures::Scope use_sse2(SSE2);
-        JumpTarget not_number;
-        __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
-               Immediate(FACTORY->heap_number_map()));
-        not_number.Branch(not_equal, left_side);
-        __ movdbl(xmm1,
-                  FieldOperand(left_reg, HeapNumber::kValueOffset));
-        int value = Smi::cast(*right_val)->value();
-        if (value == 0) {
-          __ xorpd(xmm0, xmm0);
-        } else {
-          Result temp = allocator()->Allocate();
-          __ mov(temp.reg(), Immediate(value));
-          __ cvtsi2sd(xmm0, Operand(temp.reg()));
-          temp.Unuse();
-        }
-        __ ucomisd(xmm1, xmm0);
-        // Jump to builtin for NaN.
-        not_number.Branch(parity_even, left_side);
-        left_side->Unuse();
-        dest->true_target()->Branch(DoubleCondition(cc));
-        dest->false_target()->Jump();
-        not_number.Bind(left_side);
-      }
-
-      // Setup and call the compare stub.
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, left_side, right_side);
-      result.ToRegister();
-      __ test(result.reg(), Operand(result.reg()));
-      result.Unuse();
-      if (cc == equal) {
-        dest->Split(cc);
-      } else {
-        dest->true_target()->Branch(cc);
-        dest->false_target()->Jump();
-
-        // It is important for performance for this case to be at the end.
-        is_smi.Bind(left_side, right_side);
-        if (IsUnsafeSmi(right_side->handle())) {
-          right_side->ToRegister();
-          __ cmp(left_reg, Operand(right_side->reg()));
-        } else {
-          __ cmp(Operand(left_reg), Immediate(right_side->handle()));
-        }
-        left_side->Unuse();
-        right_side->Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-// Check that the comparison operand is a number. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void CheckComparisonOperand(MacroAssembler* masm_,
-                                   Result* operand,
-                                   Result* left_side,
-                                   Result* right_side,
-                                   JumpTarget* not_numbers) {
-  // Perform check if operand is not known to be a number.
-  if (!operand->type_info().IsNumber()) {
-    Label done;
-    __ test(operand->reg(), Immediate(kSmiTagMask));
-    __ j(zero, &done);
-    __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-           Immediate(FACTORY->heap_number_map()));
-    not_numbers->Branch(not_equal, left_side, right_side, not_taken);
-    __ bind(&done);
-  }
-}
-
-
-// Load a comparison operand to the FPU stack. This assumes that the operand has
-// already been checked and is a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
-                                  Result* operand) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiUntag(operand->reg());
-    __ push(operand->reg());
-    __ fild_s(Operand(esp, 0));
-    __ pop(operand->reg());
-    __ SmiTag(operand->reg());
-  } else {
-    // Operand type not known, check for smi otherwise assume heap number.
-    Label smi;
-    __ test(operand->reg(), Immediate(kSmiTagMask));
-    __ j(zero, &smi);
-    __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-    __ bind(&smi);
-    __ SmiUntag(operand->reg());
-    __ push(operand->reg());
-    __ fild_s(Operand(esp, 0));
-    __ pop(operand->reg());
-    __ SmiTag(operand->reg());
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperandSSE2(MacroAssembler* masm_,
-                                      Result* operand,
-                                      XMMRegister xmm_reg,
-                                      Result* left_side,
-                                      Result* right_side,
-                                      JumpTarget* not_numbers) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiUntag(operand->reg());
-    __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
-    __ SmiTag(operand->reg());
-  } else {
-    // Operand type not known, check for smi or heap number.
-    Label smi;
-    __ test(operand->reg(), Immediate(kSmiTagMask));
-    __ j(zero, &smi);
-    if (!operand->type_info().IsNumber()) {
-      __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-             Immediate(FACTORY->heap_number_map()));
-      not_numbers->Branch(not_equal, left_side, right_side, taken);
-    }
-    __ movdbl(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-
-    __ bind(&smi);
-    // Comvert smi to float and keep the original smi.
-    __ SmiUntag(operand->reg());
-    __ cvtsi2sd(xmm_reg, Operand(operand->reg()));
-    __ SmiTag(operand->reg());
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
-                                                   Result* right_side,
-                                                   Condition cc,
-                                                   ControlDestination* dest) {
-  ASSERT(left_side->is_register());
-  ASSERT(right_side->is_register());
-
-  JumpTarget not_numbers;
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-
-    // Load left and right operand into registers xmm0 and xmm1 and compare.
-    LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
-                              &not_numbers);
-    LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
-                              &not_numbers);
-    __ ucomisd(xmm0, xmm1);
-  } else {
-    Label check_right, compare;
-
-    // Make sure that both comparison operands are numbers.
-    CheckComparisonOperand(masm_, left_side, left_side, right_side,
-                           &not_numbers);
-    CheckComparisonOperand(masm_, right_side, left_side, right_side,
-                           &not_numbers);
-
-    // Load right and left operand to FPU stack and compare.
-    LoadComparisonOperand(masm_, right_side);
-    LoadComparisonOperand(masm_, left_side);
-    __ FCmp();
-  }
-
-  // Bail out if a NaN is involved.
-  not_numbers.Branch(parity_even, left_side, right_side, not_taken);
-
-  // Split to destination targets based on comparison.
-  left_side->Unuse();
-  right_side->Unuse();
-  dest->true_target()->Branch(DoubleCondition(cc));
-  dest->false_target()->Jump();
-
-  not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-    frame_->SpillTop();
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  Result answer = frame_->CallStub(&call_function, arg_count + 1);
-  // Restore context and replace function on the stack with the
-  // result of the stub invocation.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  frame()->Dup();
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame()->Push(name);
-  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
-  __ nop();
-  frame()->Push(&answer);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of frame at this point:
-  // Frame[0]: arguments object of the current function or the hole.
-  // Frame[1]: receiver
-  // Frame[2]: applicand.apply
-  // Frame[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  frame_->Dup();
-  Result probe = frame_->Pop();
-  { VirtualFrame::SpilledScope spilled_scope;
-    Label slow, done;
-    bool try_lazy = true;
-    if (probe.is_constant()) {
-      try_lazy = probe.handle()->IsArgumentsMarker();
-    } else {
-      __ cmp(Operand(probe.reg()), Immediate(FACTORY->arguments_marker()));
-      probe.Unuse();
-      __ j(not_equal, &slow);
-    }
-
-    if (try_lazy) {
-      Label build_args;
-      // Get rid of the arguments object probe.
-      frame_->Drop();  // Can be called on a spilled frame.
-      // Stack now has 3 elements on it.
-      // Contents of stack at this point:
-      // esp[0]: receiver
-      // esp[1]: applicand.apply
-      // esp[2]: applicand.
-
-      // Check that the receiver really is a JavaScript object.
-      __ mov(eax, Operand(esp, 0));
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(zero, &build_args);
-      // We allow all JSObjects including JSFunctions.  As long as
-      // JS_FUNCTION_TYPE is the last instance type and it is right
-      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-      // bound.
-      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-      __ j(below, &build_args);
-
-      // Check that applicand.apply is Function.prototype.apply.
-      __ mov(eax, Operand(esp, kPointerSize));
-      __ test(eax, Immediate(kSmiTagMask));
-      __ j(zero, &build_args);
-      __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
-      __ j(not_equal, &build_args);
-      __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset));
-      __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag));
-      Handle<Code> apply_code(masm()->isolate()->builtins()->builtin(
-          Builtins::kFunctionApply));
-      __ cmp(Operand(ecx), Immediate(apply_code));
-      __ j(not_equal, &build_args);
-
-      // Check that applicand is a function.
-      __ mov(edi, Operand(esp, 2 * kPointerSize));
-      __ test(edi, Immediate(kSmiTagMask));
-      __ j(zero, &build_args);
-      __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-      __ j(not_equal, &build_args);
-
-      // Copy the arguments to this function possibly from the
-      // adaptor frame below it.
-      Label invoke, adapted;
-      __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-      __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-      __ cmp(Operand(ecx),
-             Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-      __ j(equal, &adapted);
-
-      // No arguments adaptor frame. Copy fixed number of arguments.
-      __ mov(eax, Immediate(scope()->num_parameters()));
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        __ push(frame_->ParameterAt(i));
-      }
-      __ jmp(&invoke);
-
-      // Arguments adaptor frame present. Copy arguments from there, but
-      // avoid copying too many arguments to avoid stack overflows.
-      __ bind(&adapted);
-      static const uint32_t kArgumentsLimit = 1 * KB;
-      __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-      __ SmiUntag(eax);
-      __ mov(ecx, Operand(eax));
-      __ cmp(eax, kArgumentsLimit);
-      __ j(above, &build_args);
-
-      // Loop through the arguments pushing them onto the execution
-      // stack. We don't inform the virtual frame of the push, so we don't
-      // have to worry about getting rid of the elements from the virtual
-      // frame.
-      Label loop;
-      // ecx is a small non-negative integer, due to the test above.
-      __ test(ecx, Operand(ecx));
-      __ j(zero, &invoke);
-      __ bind(&loop);
-      __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
-      __ dec(ecx);
-      __ j(not_zero, &loop);
-
-      // Invoke the function.
-      __ bind(&invoke);
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, actual, CALL_FUNCTION);
-      // Drop applicand.apply and applicand from the stack, and push
-      // the result of the function call, but leave the spilled frame
-      // unchanged, with 3 elements, so it is correct when we compile the
-      // slow-case code.
-      __ add(Operand(esp), Immediate(2 * kPointerSize));
-      __ push(eax);
-      // Stack now has 1 element:
-      //   esp[0]: result
-      __ jmp(&done);
-
-      // Slow-case: Allocate the arguments object since we know it isn't
-      // there, and fall-through to the slow-case where we call
-      // applicand.apply.
-      __ bind(&build_args);
-      // Stack now has 3 elements, because we have jumped from where:
-      // esp[0]: receiver
-      // esp[1]: applicand.apply
-      // esp[2]: applicand.
-
-      // StoreArgumentsObject requires a correct frame, and may modify it.
-      Result arguments_object = StoreArgumentsObject(false);
-      frame_->SpillAll();
-      arguments_object.ToRegister();
-      frame_->EmitPush(arguments_object.reg());
-      arguments_object.Unuse();
-      // Stack and frame now have 4 elements.
-      __ bind(&slow);
-    }
-
-    // Generic computation of x.apply(y, args) with no special optimization.
-    // Flip applicand.apply and applicand on the stack, so
-    // applicand looks like the receiver of the applicand.apply call.
-    // Then process it as a normal function call.
-    __ mov(eax, Operand(esp, 3 * kPointerSize));
-    __ mov(ebx, Operand(esp, 2 * kPointerSize));
-    __ mov(Operand(esp, 2 * kPointerSize), eax);
-    __ mov(Operand(esp, 3 * kPointerSize), ebx);
-
-    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-    Result res = frame_->CallStub(&call_function, 3);
-    // The function and its two arguments have been dropped.
-    frame_->Drop(1);  // Drop the receiver as well.
-    res.ToRegister();
-    frame_->EmitPush(res.reg());
-    // Stack now has 1 element:
-    //   esp[0]: result
-    if (try_lazy) __ bind(&done);
-  }  // End of spilled scope.
-  // Restore the context register after a call.
-  frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
-  DeferredStackCheck() {
-    set_comment("[ DeferredStackCheck");
-  }
-
-  virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
-  StackCheckStub stub;
-  __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
-  DeferredStackCheck* deferred = new DeferredStackCheck;
-  ExternalReference stack_limit =
-      ExternalReference::address_of_stack_limit(masm()->isolate());
-  __ cmp(esp, Operand::StaticVariable(stack_limit));
-  deferred->Branch(below);
-  deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Visit(statement);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  VisitStatements(statements);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  // Call the runtime to declare the globals.  The inevitable call
-  // will sync frame elements to memory anyway, so we do it eagerly to
-  // allow us to push the arguments directly into place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  frame_->EmitPush(esi);  // The context is the first argument.
-  frame_->EmitPush(Immediate(pairs));
-  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
-  frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
-  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.  Sync the virtual frame eagerly
-    // so we can simply push the arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(var->name()));
-    // Declaration nodes are always introduced in one of two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPush(Immediate(FACTORY->the_hole_value()));
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-  if (val != NULL) {
-    {
-      // Set the initial value.
-      Reference target(this, node->proxy());
-      Load(val);
-      target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
-    }
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  // Remove the lingering expression result from the top of stack.
-  frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    JumpTarget then;
-    JumpTarget else_;
-    ControlDestination dest(&then, &else_, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The else target was bound, so we compile the else part first.
-      Visit(node->else_statement());
-
-      // We may have dangling jumps to the then part.
-      if (then.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then target was bound, so we compile the then part first.
-      Visit(node->then_statement());
-
-      if (else_.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    }
-
-  } else if (has_then_stm) {
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    ControlDestination dest(&then, &exit, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // then part.
-      if (then.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then label was bound.
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    ControlDestination dest(&exit, &else_, false);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.true_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // else part.
-      if (else_.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    } else {
-      // The else label was bound.
-      Visit(node->else_statement());
-    }
-
-  } else {
-    ASSERT(!has_then_stm && !has_else_stm);
-    // We only care about the condition's side effects (not its value
-    // or control flow effect).  LoadCondition is called without
-    // forcing control flow.
-    ControlDestination dest(&exit, &exit, true);
-    LoadCondition(node->condition(), &dest, false);
-    if (!dest.is_used()) {
-      // We got a value on the frame rather than (or in addition to)
-      // control flow.
-      frame_->Drop();
-    }
-  }
-
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result return_value = frame_->Pop();
-  masm()->positions_recorder()->WriteRecordedPositions();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump(&return_value);
-  } else {
-    frame_->PrepareForReturn();
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump(&return_value);
-    } else {
-      function_return_.Bind(&return_value);
-      GenerateReturnSequence(&return_value);
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
-  // The return value is a live (but not currently reference counted)
-  // reference to eax.  This is safe because the current frame does not
-  // contain a reference to eax (it is prepared for the return by spilling
-  // all registers).
-  if (FLAG_trace) {
-    frame_->Push(return_value);
-    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-  return_value->ToRegister(eax);
-
-  // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-
-  // Leave the frame and return popping the arguments and the
-  // receiver.
-  frame_->Exit();
-  int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
-  __ Ret(arguments_bytes, ecx);
-  DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Check that the size of the code used for returning is large enough
-  // for the debugger's requirements.
-  ASSERT(Assembler::kJSReturnSequenceLength <=
-         masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result context;
-  if (node->is_catch_block()) {
-    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    context = frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-
-  // Update context local.
-  frame_->SaveContextRegister();
-
-  // Verify that the runtime call result and esi agree.
-  if (FLAG_debug_code) {
-    __ cmp(context.reg(), Operand(esi));
-    __ Assert(equal, "Runtime::NewContext should end up in esi");
-  }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
-  // Update context local.
-  frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  // Compile the switch value.
-  Load(node->tag());
-
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  JumpTarget next_test;
-  // Compile the case label expressions and comparisons.  Exit early
-  // if a comparison is unconditionally true.  The target next_test is
-  // bound before the loop in order to indicate control flow to the
-  // first comparison.
-  next_test.Bind();
-  for (int i = 0; i < length && !next_test.is_unused(); i++) {
-    CaseClause* clause = cases->at(i);
-    // The default is not a test, but remember it for later.
-    if (clause->is_default()) {
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case comparison");
-    // We recycle the same target next_test for each test.  Bind it if
-    // the previous test has not done so and then unuse it for the
-    // loop.
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    next_test.Unuse();
-
-    // Duplicate the switch value.
-    frame_->Dup();
-
-    // Compile the label expression.
-    Load(clause->label());
-
-    // Compare and branch to the body if true or the next test if
-    // false.  Prefer the next test as a fall through.
-    ControlDestination dest(clause->body_target(), &next_test, false);
-    Comparison(node, equal, true, &dest);
-
-    // If the comparison fell through to the true target, jump to the
-    // actual body.
-    if (dest.true_was_fall_through()) {
-      clause->body_target()->Unuse();
-      clause->body_target()->Jump();
-    }
-  }
-
-  // If there was control flow to a next test from the last one
-  // compiled, compile a jump to the default or break target.
-  if (!next_test.is_unused()) {
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    // Drop the switch value.
-    frame_->Drop();
-    if (default_clause != NULL) {
-      default_clause->body_target()->Jump();
-    } else {
-      node->break_target()->Jump();
-    }
-  }
-
-  // The last instruction emitted was a jump, either to the default
-  // clause or the break target, or else to a case body from the loop
-  // that compiles the tests.
-  ASSERT(!has_valid_frame());
-  // Compile case bodies as needed.
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-
-    // There are two ways to reach the body: from the corresponding
-    // test or as the fall through of the previous body.
-    if (clause->body_target()->is_linked() || has_valid_frame()) {
-      if (clause->body_target()->is_linked()) {
-        if (has_valid_frame()) {
-          // If we have both a jump to the test and a fall through, put
-          // a jump on the fall through path to avoid the dropping of
-          // the switch value on the test path.  The exception is the
-          // default which has already had the switch value dropped.
-          if (clause->is_default()) {
-            clause->body_target()->Bind();
-          } else {
-            JumpTarget body;
-            body.Jump();
-            clause->body_target()->Bind();
-            frame_->Drop();
-            body.Bind();
-          }
-        } else {
-          // No fall through to worry about.
-          clause->body_target()->Bind();
-          if (!clause->is_default()) {
-            frame_->Drop();
-          }
-        }
-      } else {
-        // Otherwise, we have only fall through.
-        ASSERT(has_valid_frame());
-      }
-
-      // We are now prepared to compile the body.
-      Comment cmnt(masm_, "[ Case body");
-      VisitStatements(clause->statements());
-    }
-    clause->body_target()->Unuse();
-  }
-
-  // We may not have a valid frame here so bind the break target only
-  // if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  // Label the top of the loop for the backward jump if necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // Use the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      // No need to label it.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      break;
-    case DONT_KNOW:
-      // Continue is the test, so use the backward body target.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control flow can fall off the end of the body, jump back
-      // to the top and bind the break target at the exit.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // We may have had continues or breaks in the body.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        ControlDestination dest(&body, node->break_target(), false);
-        LoadCondition(node->cond(), &dest, true);
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-  }
-
-  DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop with the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is the test at the bottom, no need to label the test
-        // at the top.  The body is a backward target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Label the test at the top as the continue target.  The body
-        // is a forward-only target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      }
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // The loop body has been labeled with the continue target.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        // If we have chosen to recompile the test at the bottom,
-        // then it is the continue target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here and thus an invalid fall-through).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // If we have chosen not to recompile the test at the bottom,
-        // jump back to the one at the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
-  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
-  if (slot->type() == Slot::LOCAL) {
-    frame_->SetTypeForLocalAt(slot->index(), info);
-  } else {
-    frame_->SetTypeForParamAt(slot->index(), info);
-  }
-  if (FLAG_debug_code && info.IsSmi()) {
-    if (slot->type() == Slot::LOCAL) {
-      frame_->PushLocalAt(slot->index());
-    } else {
-      frame_->PushParameterAt(slot->index());
-    }
-    Result var = frame_->Pop();
-    var.ToRegister();
-    __ AbortIfNotSmi(var.reg());
-  }
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-
-  // Compile the init expression if present.
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything else.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-
-  // Target for backward edge if no test at the bottom, otherwise
-  // unused.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-  // Target for backward edge if there is a test at the bottom,
-  // otherwise used as target for test at the top.
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop.
-      if (node->next() == NULL) {
-        // Use the continue target if there is no update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // Otherwise use the backward loop target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is either the update expression or the test at the
-        // bottom, no need to label the test at the top.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else if (node->next() == NULL) {
-        // We are not recompiling the test at the bottom and there is no
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // We are not recompiling the test at the bottom and there is an
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  // We know that the loop index is a smi if it is not modified in the
-  // loop body and it is checked against a constant limit in the loop
-  // condition.  In this case, we reset the static type information of the
-  // loop index to smi before compiling the body, the update expression, and
-  // the bottom check of the loop condition.
-  if (node->is_fast_smi_loop()) {
-    // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
-  }
-
-  Visit(node->body());
-
-  // If there is an update expression, compile it if necessary.
-  if (node->next() != NULL) {
-    if (node->continue_target()->is_linked()) {
-      node->continue_target()->Bind();
-    }
-
-    // Control can reach the update by falling out of the body or by a
-    // continue.
-    if (has_valid_frame()) {
-      // Record the source position of the statement as this code which
-      // is after the code for the body actually belongs to the loop
-      // statement and not the body.
-      CodeForStatementPosition(node);
-      Visit(node->next());
-    }
-  }
-
-  // Set the type of the loop variable to smi before compiling the test
-  // expression if we are in a fast smi loop condition.
-  if (node->is_fast_smi_loop() && has_valid_frame()) {
-    // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->AsSlot(), TypeInfo::Smi());
-  }
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      if (has_valid_frame()) {
-        if (node->next() == NULL) {
-          node->continue_target()->Jump();
-        } else {
-          loop.Jump();
-        }
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        if (node->continue_target()->is_linked()) {
-          // We can have dangling jumps to the continue target if there
-          // was no update expression.
-          node->continue_target()->Bind();
-        }
-        // Control can reach the test at the bottom by falling out of
-        // the body, by a continue in the body, or from the update
-        // expression.
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // Otherwise, jump back to the test at the top.
-        if (has_valid_frame()) {
-          if (node->next() == NULL) {
-            node->continue_target()->Jump();
-          } else {
-            loop.Jump();
-          }
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  LoadAndSpill(node->enumerable());
-
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(eax);
-
-  // eax: value to be iterated over
-  __ cmp(eax, FACTORY->undefined_value());
-  exit.Branch(equal);
-  __ cmp(eax, FACTORY->null_value());
-  exit.Branch(equal);
-
-  // Stack layout in body:
-  // [iteration counter (smi)] <- slot 0
-  // [length of array]         <- slot 1
-  // [FixedArray]              <- slot 2
-  // [Map or 0]                <- slot 3
-  // [Object]                  <- slot 4
-
-  // Check if enumerable is already a JSObject
-  // eax: value to be iterated over
-  __ test(eax, Immediate(kSmiTagMask));
-  primitive.Branch(zero);
-  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-  jsobject.Branch(above_equal);
-
-  primitive.Bind();
-  frame_->EmitPush(eax);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
-  // function call returns the value in eax, which is where we want it below
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // eax: value to be iterated over
-  frame_->EmitPush(eax);  // Push the object being iterated over.
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ mov(ecx, eax);
-  loop.Bind();
-  // Check that there are no elements.
-  __ mov(edx, FieldOperand(ecx, JSObject::kElementsOffset));
-  __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
-  call_runtime.Branch(not_equal);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
-  __ cmp(Operand(edx), Immediate(FACTORY->empty_descriptor_array()));
-  call_runtime.Branch(equal);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
-  __ test(edx, Immediate(kSmiTagMask));
-  call_runtime.Branch(zero);
-  // For all objects but the receiver, check that the cache is empty.
-  __ cmp(ecx, Operand(eax));
-  check_prototype.Branch(equal);
-  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ cmp(Operand(edx), Immediate(FACTORY->empty_fixed_array()));
-  call_runtime.Branch(not_equal);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
-  __ cmp(Operand(ecx), Immediate(FACTORY->null_value()));
-  loop.Branch(not_equal);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(eax);  // push the Object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a map from the runtime call, we can do a fast
-  // modification check. Otherwise, we got a fixed array, and we have
-  // to do a slow check.
-  // eax: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ mov(edx, Operand(eax));
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ cmp(ecx, FACTORY->meta_map());
-  fixed_array.Branch(not_equal);
-
-  use_cache.Bind();
-  // Get enum cache
-  // eax: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ mov(ecx, Operand(eax));
-
-  __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
-  // Get the bridge array held in the enumeration index field.
-  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
-  // Get the cache from the bridge array.
-  __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(eax);  // <- slot 3
-  frame_->EmitPush(edx);  // <- slot 2
-  __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
-  frame_->EmitPush(eax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
-  entry.Jump();
-
-  fixed_array.Bind();
-  // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
-  frame_->EmitPush(eax);  // <- slot 2
-
-  // Push the length of the array and the initial index onto the stack.
-  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
-  frame_->EmitPush(eax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
-
-  // Condition.
-  entry.Bind();
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  __ mov(eax, frame_->ElementAt(0));  // load the current count
-  __ cmp(eax, frame_->ElementAt(1));  // compare to the array length
-  node->break_target()->Branch(above_equal);
-
-  // Get the i'th entry of the array.
-  __ mov(edx, frame_->ElementAt(2));
-  __ mov(ebx, FixedArrayElementOperand(edx, eax));
-
-  // Get the expected map from the stack or a zero map in the
-  // permanent slow case eax: current iteration count ebx: i'th entry
-  // of the enum cache
-  __ mov(edx, frame_->ElementAt(3));
-  // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
-  // eax: current iteration count
-  // ebx: i'th entry of the enum cache
-  // edx: expected map value
-  __ mov(ecx, frame_->ElementAt(4));
-  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ cmp(ecx, Operand(edx));
-  end_del_check.Branch(equal);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(ebx);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
-  __ mov(ebx, Operand(eax));
-
-  // If the property has been removed while iterating, we just skip it.
-  __ test(ebx, Operand(ebx));
-  node->continue_target()->Branch(equal);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  edx: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(ebx);
-  { Reference each(this, node->each());
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        // Loading a reference may leave the frame in an unspilled state.
-        frame_->SpillAll();
-        // Get the value (under the reference on the stack) from memory.
-        frame_->EmitPush(frame_->ElementAt(each.size()));
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop(2);
-      } else {
-        // If the reference was to a slot we rely on the convenient property
-        // that it doesn't matter whether a value (eg, ebx pushed above) is
-        // right on top of or right underneath a zero-sized reference.
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop();
-      }
-    }
-  }
-  // Unloading a reference may leave the frame in an unspilled state.
-  frame_->SpillAll();
-
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  VisitAndSpill(node->body());
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(eax);
-  __ add(Operand(eax), Immediate(Smi::FromInt(1)));
-  frame_->EmitPush(eax);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(eax);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  VisitStatementsAndSpill(node->catch_block()->statements());
-  if (has_valid_frame()) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address,
-                                    masm()->isolate());
-
-  // Make sure that there's nothing left on the stack above the
-  // handler structure.
-  if (FLAG_debug_code) {
-    __ mov(eax, Operand::StaticVariable(handler_address));
-    __ cmp(esp, Operand(eax));
-    __ Assert(equal, "stack pointer should point to top handler");
-  }
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(Operand::StaticVariable(handler_address));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing targets that
-  // have been jumped to.  Deallocate each shadow target.
-  Result return_value;
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain; be careful not to destroy the TOS if
-      // there is one.
-      if (i == kReturnShadowIndex) {
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(eax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ mov(esp, Operand::StaticVariable(handler_address));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(Operand::StaticVariable(handler_address));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
-        shadows[i]->other_target()->Jump(&return_value);
-      } else {
-        shadows[i]->other_target()->Jump();
-      }
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(eax);
-  // In case of thrown exceptions, this is where we continue.
-  __ Set(ecx, Immediate(Smi::FromInt(THROWING)));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address,
-                                    masm()->isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(Operand::StaticVariable(handler_address));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in ecx, then jump around the unlink blocks if any.
-    frame_->EmitPush(Immediate(FACTORY->undefined_value()));
-    __ Set(ecx, Immediate(Smi::FromInt(FALLING)));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // on the virtual frame.  We must preserve it until it is
-      // pushed.
-      if (i == kReturnShadowIndex) {
-        Result return_value;
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(eax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ mov(esp, Operand::StaticVariable(handler_address));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      frame_->EmitPop(Operand::StaticVariable(handler_address));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this target shadowed the function return, materialize
-        // the return value on the stack.
-        frame_->EmitPush(eax);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        frame_->EmitPush(Immediate(FACTORY->undefined_value()));
-      }
-      __ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(ecx);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  VisitStatementsAndSpill(node->finally_block()->statements());
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(ecx);
-    frame_->EmitPop(eax);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      BreakTarget* original = shadows[i]->other_target();
-      __ cmp(Operand(ecx), Immediate(Smi::FromInt(JUMPING + i)));
-      if (i == kReturnShadowIndex) {
-        // The return value is (already) in eax.
-        Result return_value = allocator_->Allocate(eax);
-        ASSERT(return_value.is_valid());
-        if (function_return_is_shadowed_) {
-          original->Branch(equal, &return_value);
-        } else {
-          // Branch around the preparation for return which may emit
-          // code.
-          JumpTarget skip;
-          skip.Branch(not_equal);
-          frame_->PrepareForReturn();
-          original->Jump(&return_value);
-          skip.Bind();
-        }
-      } else {
-        original->Branch(equal);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ cmp(Operand(ecx), Immediate(Smi::FromInt(THROWING)));
-    exit.Branch(not_equal);
-
-    // Rethrow exception.
-    frame_->EmitPush(eax);  // undo pop from above
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Spill everything, even constants, to the frame.
-  frame_->SpillAll();
-
-  frame_->DebugBreak();
-  // Ignore the return value.
-#endif
-}
-
-
-Result CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // The inevitable call will sync frame elements to memory anyway, so
-  // we do it eagerly to allow us to push the arguments directly into
-  // place.
-  frame()->SyncRange(0, frame()->element_count() - 1);
-
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame()->EmitPush(Immediate(function_info));
-    return frame()->CallStub(&stub, 1);
-  } else {
-    // Call the runtime to instantiate the function based on the
-    // shared function info.
-    frame()->EmitPush(esi);
-    frame()->EmitPush(Immediate(function_info));
-    frame()->EmitPush(Immediate(pretenure
-                                ? FACTORY->true_value()
-                                : FACTORY->false_value()));
-    return frame()->CallRuntime(Runtime::kNewClosure, 3);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
-  ASSERT(!in_safe_int32_mode());
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  // Check for stack-overflow exception.
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    return;
-  }
-  Result result = InstantiateFunction(function_info, node->pretenure());
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  Result result = InstantiateFunction(node->shared_function_info(), false);
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  Comment cmnt(masm_, "[ Conditional");
-  ASSERT(!in_safe_int32_mode());
-  JumpTarget then;
-  JumpTarget else_;
-  JumpTarget exit;
-  ControlDestination dest(&then, &else_, true);
-  LoadCondition(node->condition(), &dest, true);
-
-  if (dest.false_was_fall_through()) {
-    // The else target was bound, so we compile the else part first.
-    Load(node->else_expression());
-
-    if (then.is_linked()) {
-      exit.Jump();
-      then.Bind();
-      Load(node->then_expression());
-    }
-  } else {
-    // The then target was bound, so we compile the then part first.
-    Load(node->then_expression());
-
-    if (else_.is_linked()) {
-      exit.Jump();
-      else_.Bind();
-      Load(node->else_expression());
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-    JumpTarget slow;
-    JumpTarget done;
-    Result value;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &value,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // A runtime call is inevitable.  We eagerly sync frame elements
-    // to memory so that we can push the arguments directly into place
-    // on top of the frame.
-    frame()->SyncRange(0, frame()->element_count() - 1);
-    frame()->EmitPush(esi);
-    frame()->EmitPush(Immediate(slot->var()->name()));
-    if (typeof_state == INSIDE_TYPEOF) {
-      value =
-          frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-      value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind(&value);
-    frame_->Push(&value);
-
-  } else if (slot->var()->mode() == Variable::CONST) {
-    // Const slots may contain 'the hole' value (the constant hasn't been
-    // initialized yet) which needs to be converted into the 'undefined'
-    // value.
-    //
-    // We currently spill the virtual frame because constants use the
-    // potentially unsafe direct-frame access of SlotOperand.
-    VirtualFrame::SpilledScope spilled_scope;
-    Comment cmnt(masm_, "[ Load const");
-    Label exit;
-    __ mov(ecx, SlotOperand(slot, ecx));
-    __ cmp(ecx, FACTORY->the_hole_value());
-    __ j(not_equal, &exit);
-    __ mov(ecx, FACTORY->undefined_value());
-    __ bind(&exit);
-    frame()->EmitPush(ecx);
-
-  } else if (slot->type() == Slot::PARAMETER) {
-    frame()->PushParameterAt(slot->index());
-
-  } else if (slot->type() == Slot::LOCAL) {
-    frame()->PushLocalAt(slot->index());
-
-  } else {
-    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
-    // here.
-    //
-    // The use of SlotOperand below is safe for an unspilled frame
-    // because it will always be a context slot.
-    ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame()->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                    TypeofState state) {
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // If the loaded value is a constant, we know if the arguments
-  // object has been lazily loaded yet.
-  Result result = frame()->Pop();
-  if (result.is_constant()) {
-    if (result.handle()->IsArgumentsMarker()) {
-      result = StoreArgumentsObject(false);
-    }
-    frame()->Push(&result);
-    return;
-  }
-  ASSERT(result.is_register());
-  // The loaded value is in a register. If it is the sentinel that
-  // indicates that we haven't loaded the arguments object yet, we
-  // need to do it now.
-  JumpTarget exit;
-  __ cmp(Operand(result.reg()), Immediate(FACTORY->arguments_marker()));
-  frame()->Push(&result);
-  exit.Branch(not_equal);
-
-  result = StoreArgumentsObject(false);
-  frame()->SetElementAt(0, &result);
-  result.Unuse();
-  exit.Bind();
-  return;
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    JumpTarget* slow) {
-  ASSERT(!in_safe_int32_mode());
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register context = esi;
-  Result tmp = allocator_->Allocate();
-  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      // Load next context in chain.
-      __ mov(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.  If we have reached an eval scope, we check
-    // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s != NULL && s->is_eval_scope()) {
-    // Loop up the context chain.  There is no frame effect so it is
-    // safe to use raw labels here.
-    Label next, fast;
-    if (!context.is(tmp.reg())) {
-      __ mov(tmp.reg(), context);
-    }
-    __ bind(&next);
-    // Terminate at global context.
-    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-           Immediate(FACTORY->global_context_map()));
-    __ j(equal, &fast);
-    // Check that extension is NULL.
-    __ cmp(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
-    slow->Branch(not_equal, not_taken);
-    // Load next context in chain.
-    __ mov(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
-    __ mov(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-    __ jmp(&next);
-    __ bind(&fast);
-  }
-  tmp.Unuse();
-
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  // The register allocator prefers eax if it is free, so the code generator
-  // will load the global object directly into eax, which is where the LoadIC
-  // expects it.
-  frame_->Spill(eax);
-  LoadGlobal();
-  frame_->Push(slot->var()->name());
-  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT;
-  Result answer = frame_->CallLoadIC(mode);
-  // A test eax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test eax
-  // instruction here.
-  __ nop();
-  return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    Result* result,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    done->Jump(result);
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      // Allocate a fresh register to use as a temp in
-      // ContextSlotOperandCheckExtensions and to hold the result
-      // value.
-      *result = allocator()->Allocate();
-      ASSERT(result->is_valid());
-      __ mov(result->reg(),
-             ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ cmp(result->reg(), FACTORY->the_hole_value());
-        done->Branch(not_equal, result);
-        __ mov(result->reg(), FACTORY->undefined_value());
-      }
-      done->Jump(result);
-    } else if (rewrite != NULL) {
-      // Generate fast case for calls of an argument function.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          Result arguments = allocator()->Allocate();
-          ASSERT(arguments.is_valid());
-          __ mov(arguments.reg(),
-                 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                   arguments,
-                                                   slow));
-          frame_->Push(&arguments);
-          frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad();
-          done->Jump(result);
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.  Since the call is inevitable,
-    // we eagerly sync the virtual frame so we can directly push the
-    // arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(slot->var()->name()));
-
-    Result value;
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize const
-      // properties (introduced via eval("const foo = (some expr);")). Also,
-      // uses the current function context instead of the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the same
-      // time, because the const declaration may be at the end of the eval
-      // code (sigh...) and the const variable may have been used before
-      // (where its value is 'undefined'). Thus, we can only do the
-      // initialization when we actually encounter the expression and when
-      // the expression operands are defined and valid, and thus we need the
-      // split into 2 operations: declaration of the context slot followed
-      // by initialization.
-      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling chained assignment
-    // expressions.
-    frame_->Push(&value);
-
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-
-    JumpTarget exit;
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is executed,
-      // the code is identical to a normal store (see below).
-      //
-      // We spill the frame in the code below because the direct-frame
-      // access of SlotOperand is potentially unsafe with an unspilled
-      // frame.
-      VirtualFrame::SpilledScope spilled_scope;
-      Comment cmnt(masm_, "[ Init const");
-      __ mov(ecx, SlotOperand(slot, ecx));
-      __ cmp(ecx, FACTORY->the_hole_value());
-      exit.Branch(not_equal);
-    }
-
-    // We must execute the store.  Storing a variable must keep the (new)
-    // value on the stack. This is necessary for compiling assignment
-    // expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will initialize
-    // consts to 'the hole' value and by doing so, end up calling this code.
-    if (slot->type() == Slot::PARAMETER) {
-      frame_->StoreToParameterAt(slot->index());
-    } else if (slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(slot->index());
-    } else {
-      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
-      //
-      // The use of SlotOperand below is safe for an unspilled frame
-      // because the slot is a context slot.
-      ASSERT(slot->type() == Slot::CONTEXT);
-      frame_->Dup();
-      Result value = frame_->Pop();
-      value.ToRegister();
-      Result start = allocator_->Allocate();
-      ASSERT(start.is_valid());
-      __ mov(SlotOperand(slot, start.reg()), value.reg());
-      // RecordWrite may destroy the value registers.
-      //
-      // TODO(204): Avoid actually spilling when the value is not
-      // needed (probably the common case).
-      frame_->Spill(value.reg());
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
-      // The results start, value, and temp are unused by going out of
-      // scope.
-    }
-
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* slot) {
-  Comment cmnt(masm_, "[ Slot");
-  if (in_safe_int32_mode()) {
-    if ((slot->type() == Slot::LOCAL  && !slot->is_arguments())) {
-      frame()->UntaggedPushLocalAt(slot->index());
-    } else if (slot->type() == Slot::PARAMETER) {
-      frame()->UntaggedPushParameterAt(slot->index());
-    } else {
-      UNREACHABLE();
-    }
-  } else {
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-  }
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  Comment cmnt(masm_, "[ VariableProxy");
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    ASSERT(!in_safe_int32_mode());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  Comment cmnt(masm_, "[ Literal");
-  if (frame_->ConstantPoolOverflowed()) {
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    if (in_safe_int32_mode()) {
-      temp.set_untagged_int32(true);
-    }
-    __ Set(temp.reg(), Immediate(node->handle()));
-    frame_->Push(&temp);
-  } else {
-    if (in_safe_int32_mode()) {
-      frame_->PushUntaggedElement(node->handle());
-    } else {
-      frame_->Push(node->handle());
-    }
-  }
-}
-
-
-void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
-  ASSERT(value->IsSmi());
-  int bits = reinterpret_cast<int>(*value);
-  __ push(Immediate(bits ^ jit_cookie_));
-  __ xor_(Operand(esp, 0), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) {
-  ASSERT(value->IsSmi());
-  int bits = reinterpret_cast<int>(*value);
-  __ mov(Operand(ebp, offset), Immediate(bits ^ jit_cookie_));
-  __ xor_(Operand(ebp, offset), Immediate(jit_cookie_));
-}
-
-
-void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) {
-  ASSERT(target.is_valid());
-  ASSERT(value->IsSmi());
-  int bits = reinterpret_cast<int>(*value);
-  __ Set(target, Immediate(bits ^ jit_cookie_));
-  __ xor_(target, jit_cookie_);
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
-  if (!value->IsSmi()) return false;
-  int int_value = Smi::cast(*value)->value();
-  return !is_intn(int_value, kMaxSmiInlinedBits);
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function.  Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
-  DeferredRegExpLiteral(Register boilerplate,
-                        Register literals,
-                        RegExpLiteral* node)
-      : boilerplate_(boilerplate), literals_(literals), node_(node) {
-    set_comment("[ DeferredRegExpLiteral");
-  }
-
-  void Generate();
-
- private:
-  Register boilerplate_;
-  Register literals_;
-  RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
-  // Since the entry is undefined we call the runtime system to
-  // compute the literal.
-  // Literal array (0).
-  __ push(literals_);
-  // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
-  // RegExp pattern (2).
-  __ push(Immediate(node_->pattern()));
-  // RegExp flags (3).
-  __ push(Immediate(node_->flags()));
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
-  DeferredAllocateInNewSpace(int size,
-                             Register target,
-                             int registers_to_save = 0)
-    : size_(size), target_(target), registers_to_save_(registers_to_save) {
-    ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
-    ASSERT_EQ(0, registers_to_save & target.bit());
-    set_comment("[ DeferredAllocateInNewSpace");
-  }
-  void Generate();
-
- private:
-  int size_;
-  Register target_;
-  int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
-  for (int i = 0; i < kNumRegs; i++) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ push(save_register);
-    }
-  }
-  __ push(Immediate(Smi::FromInt(size_)));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  if (!target_.is(eax)) {
-    __ mov(target_, eax);
-  }
-  for (int i = kNumRegs - 1; i >= 0; i--) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ pop(save_register);
-    }
-  }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ RegExp Literal");
-
-  // Retrieve the literals array and check the allocated entry.  Begin
-  // with a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ mov(literals.reg(),
-         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  Result boilerplate = allocator_->Allocate();
-  ASSERT(boilerplate.is_valid());
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
-  // Check whether we need to materialize the RegExp object.  If so,
-  // jump to the deferred code passing the literals array.
-  DeferredRegExpLiteral* deferred =
-      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
-  __ cmp(boilerplate.reg(), FACTORY->undefined_value());
-  deferred->Branch(equal);
-  deferred->BindExit();
-
-  // Register of boilerplate contains RegExp object.
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
-  DeferredAllocateInNewSpace* allocate_fallback =
-      new DeferredAllocateInNewSpace(size, literals.reg());
-  frame_->Push(&boilerplate);
-  frame_->SpillTop();
-  __ AllocateInNewSpace(size,
-                        literals.reg(),
-                        tmp.reg(),
-                        no_reg,
-                        allocate_fallback->entry_label(),
-                        TAG_OBJECT);
-  allocate_fallback->BindExit();
-  boilerplate = frame_->Pop();
-  // Copy from boilerplate to clone and return clone.
-
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ mov(tmp.reg(), FieldOperand(boilerplate.reg(), i));
-    __ mov(FieldOperand(literals.reg(), i), tmp.reg());
-  }
-  frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ mov(literals.reg(),
-         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-  // Literal array.
-  frame_->Push(&literals);
-  // Literal index.
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  // Constant properties.
-  frame_->Push(node->constant_properties());
-  // Should the object literal have fast elements?
-  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
-  Result clone;
-  if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->Push(&clone);
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through.
-      case ObjectLiteral::Property::COMPUTED: {
-        Handle<Object> key(property->key()->handle());
-        if (key->IsSymbol()) {
-          // Duplicate the object as the IC receiver.
-          frame_->Dup();
-          Load(property->value());
-          if (property->emit_store()) {
-            Result ignored =
-                frame_->CallStoreIC(Handle<String>::cast(key), false,
-                                    strict_mode_flag());
-            // A test eax instruction following the store IC call would
-            // indicate the presence of an inlined version of the
-            // store. Add a nop to indicate that there is no such
-            // inlined version.
-            __ nop();
-          } else {
-            frame_->Drop(2);
-          }
-          break;
-        }
-        // Fall through
-      }
-      case ObjectLiteral::Property::PROTOTYPE: {
-          // Duplicate the object as an argument to the runtime call.
-          frame_->Dup();
-          Load(property->key());
-          Load(property->value());
-          if (property->emit_store()) {
-            frame_->Push(Smi::FromInt(NONE));   // PropertyAttributes
-            // Ignore the result.
-            Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
-          } else {
-            frame_->Drop(3);
-          }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(1));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(0));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      default: UNREACHABLE();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ mov(literals.reg(),
-         FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  frame_->Push(&literals);
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  frame_->Push(node->constant_elements());
-  int length = node->values()->length();
-  Result clone;
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
-  } else if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-  }
-  frame_->Push(&clone);
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < length; i++) {
-    Expression* value = node->values()->at(i);
-
-    if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
-      continue;
-    }
-
-    // The property must be set by generated code.
-    Load(value);
-
-    // Get the property value off the stack.
-    Result prop_value = frame_->Pop();
-    prop_value.ToRegister();
-
-    // Fetch the array literal while leaving a copy on the stack and
-    // use it to get the elements array.
-    frame_->Dup();
-    Result elements = frame_->Pop();
-    elements.ToRegister();
-    frame_->Spill(elements.reg());
-    // Get the elements array.
-    __ mov(elements.reg(),
-           FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
-
-    // Update the write barrier for the array address.
-    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_valid());
-    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
-  }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  ASSERT(!in_safe_int32_mode());
-  ASSERT(!in_spilled_code());
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  Result result =
-      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      frame()->Dup();
-    }
-    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame()->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else if (var != NULL) {
-      // The LoadIC stub expects the object in eax.
-      // Freeing eax causes the code generator to load the global into it.
-      frame_->Spill(eax);
-      LoadGlobal();
-    } else {
-      frame()->Dup();
-    }
-    Result value = EmitNamedLoad(name, var != NULL);
-    frame()->Push(&value);
-    Load(node->value());
-
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    Result value = frame()->Pop();
-    frame()->Push(prop->obj());
-    frame()->Push(&value);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  Result answer = EmitNamedStore(name, is_contextual);
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment.  Swap
-      // the receiver and the value of the assignment expression.
-      Result result = frame()->Pop();
-      Result receiver = frame()->Pop();
-      frame()->Push(&result);
-      frame()->Push(&receiver);
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame()->PushElementAt(1);
-    frame()->PushElementAt(1);
-    Result value = EmitKeyedLoad();
-    frame()->Push(&value);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  Result answer = EmitKeyedStore(prop->key()->type());
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Result result = frame()->Pop();
-    Result receiver = frame()->Pop();
-    frame()->Push(&result);
-    frame()->Push(&receiver);
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-  ASSERT(!in_safe_int32_mode());
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame()->Push(&result);
-  }
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ Throw");
-  Load(node->exception());
-  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ Property");
-  Reference property(this, node);
-  property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ Call");
-
-  Expression* function = node->expression();
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Check if the function is a variable or a property.
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare the stack for the call to the resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->Push(FACTORY->undefined_value());
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Result to hold the result of the function resolution and the
-    // final result of the eval call.
-    Result result;
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                                     NOT_INSIDE_TYPEOF,
-                                                     &slow);
-      frame_->Push(&fun);
-      if (arg_count > 0) {
-        frame_->PushElementAt(arg_count);
-      } else {
-        frame_->Push(FACTORY->undefined_value());
-      }
-      frame_->PushParameterAt(-1);
-
-      // Push the strict mode flag.
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-      // Resolve the call.
-      result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump(&result);
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    frame_->PushElementAt(arg_count + 1);
-    if (arg_count > 0) {
-      frame_->PushElementAt(arg_count);
-    } else {
-      frame_->Push(FACTORY->undefined_value());
-    }
-    frame_->PushParameterAt(-1);
-
-    // Push the strict mode flag.
-    frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-    // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind(&result);
-
-    // The runtime call returns a pair of values in eax (function) and
-    // edx (receiver). Touch up the stack with the right values.
-    Result receiver = allocator_->Allocate(edx);
-    frame_->SetElementAt(arg_count + 1, &result);
-    frame_->SetElementAt(arg_count, &receiver);
-    receiver.Unuse();
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    result = frame_->CallStub(&call_function, arg_count + 1);
-
-    // Restore the context and overwrite the function on the stack with
-    // the result.
-    frame_->RestoreContextRegister();
-    frame_->SetElementAt(0, &result);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Push the name of the function onto the frame.
-    frame_->Push(var->name());
-
-    // Call the IC initialization code.
-    CodeForSourcePosition(node->position());
-    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
-                                       arg_count,
-                                       loop_nesting());
-    frame_->RestoreContextRegister();
-    frame_->Push(&result);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-    Result function;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &function,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Enter the runtime system to load the function from the context.
-    // Sync the frame so we can push the arguments directly into
-    // place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(var->name()));
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // The runtime call returns a pair of values in eax and edx.  The
-    // looked-up function is in eax and the receiver is in edx.  These
-    // register references are not ref counted here.  We spill them
-    // eagerly since they are arguments to an inevitable call (and are
-    // not sharable by the arguments).
-    ASSERT(!allocator()->is_used(eax));
-    frame_->EmitPush(eax);
-
-    // Load the receiver.
-    ASSERT(!allocator()->is_used(edx));
-    frame_->EmitPush(edx);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind(&function);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      call.Bind();
-    }
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Push the name of the function onto the frame.
-        frame_->Push(name);
-
-        // Call the IC initialization code.
-        CodeForSourcePosition(node->position());
-        Result result =
-            frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
-                               loop_nesting());
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the function to call from the property through a reference.
-
-      // Pass receiver to called function.
-      if (property->is_synthetic()) {
-        Reference ref(this, property);
-        ref.GetValue();
-        // Use global object as receiver.
-        LoadGlobalReceiver();
-        // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the name of the function.
-        Load(property->key());
-
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Result key = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&key);
-        frame_->Push(&receiver);
-        key.Unuse();
-        receiver.Unuse();
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Place the key on top of stack and call the IC initialization code.
-        frame_->PushElementAt(arg_count + 1);
-        CodeForSourcePosition(node->position());
-        Result result =
-            frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
-                                    arg_count,
-                                    loop_nesting());
-        frame_->Drop();  // Drop the key still on the stack.
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-    }
-
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver();
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-  }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Result result = frame_->CallConstructor(arg_count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // Conditionally generate a log call.
-  // Args:
-  //   0 (literal string): The type of logging (corresponds to the flags).
-  //     This is used to determine whether or not to generate the log call.
-  //   1 (string): Format string.  Access the string at argument index 2
-  //     with '%2s' (see Logger::LogRuntime for all the formats).
-  //   2 (array): Arguments to the format string.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  // Finally, we're expected to leave a value on the top of the stack.
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask | kSmiSignMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ Set(result_, Immediate(FACTORY->undefined_value()));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ Set(result_, Immediate(FACTORY->nan_value()));
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need two extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object.reg(),
-                                   index.reg(),
-                                   scratch.reg(),
-                                   result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Result code = frame_->Pop();
-  code.ToRegister();
-  ASSERT(code.is_valid());
-
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code.reg(), result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ Set(result_, Immediate(Smi::FromInt(0)));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ Set(result_, Immediate(FACTORY->empty_string()));
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need three extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch1 = allocator()->Allocate();
-  ASSERT(scratch1.is_valid());
-  Result scratch2 = allocator()->Allocate();
-  ASSERT(scratch2.is_valid());
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object.reg(),
-                               index.reg(),
-                               scratch1.reg(),
-                               scratch2.reg(),
-                               result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // Check if the object is a JS array or not.
-  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, temp.reg());
-  value.Unuse();
-  temp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  Label bailout, done, one_char_separator, long_separator,
-      non_trivial_array, not_size_one_array, loop, loop_condition,
-      loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
-  ASSERT(args->length() == 2);
-  // We will leave the separator on the stack until the end of the function.
-  Load(args->at(1));
-  // Load this to eax (= array)
-  Load(args->at(0));
-  Result array_result = frame_->Pop();
-  array_result.ToRegister(eax);
-  frame_->SpillAll();
-
-  // All aliases of the same register have disjoint lifetimes.
-  Register array = eax;
-  Register elements = no_reg;  // Will be eax.
-
-  Register index = edx;
-
-  Register string_length = ecx;
-
-  Register string = esi;
-
-  Register scratch = ebx;
-
-  Register array_length = edi;
-  Register result_pos = no_reg;  // Will be edi.
-
-  // Separator operand is already pushed.
-  Operand separator_operand = Operand(esp, 2 * kPointerSize);
-  Operand result_operand = Operand(esp, 1 * kPointerSize);
-  Operand array_length_operand = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
-  __ cld();
-  // Check that the array is a JSArray
-  __ test(array, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, &bailout);
-
-  // Check that the array has fast elements.
-  __ test_b(FieldOperand(scratch, Map::kBitField2Offset),
-            1 << Map::kHasFastElements);
-  __ j(zero, &bailout);
-
-  // If the array has length zero, return the empty string.
-  __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
-  __ sar(array_length, 1);
-  __ j(not_zero, &non_trivial_array);
-  __ mov(result_operand, FACTORY->empty_string());
-  __ jmp(&done);
-
-  // Save the array length.
-  __ bind(&non_trivial_array);
-  __ mov(array_length_operand, array_length);
-
-  // Save the FixedArray containing array's elements.
-  // End of array's live range.
-  elements = array;
-  __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
-  array = no_reg;
-
-
-  // Check that all array elements are sequential ASCII strings, and
-  // accumulate the sum of their lengths, as a smi-encoded value.
-  __ Set(index, Immediate(0));
-  __ Set(string_length, Immediate(0));
-  // Loop condition: while (index < length).
-  // Live loop registers: index, array_length, string,
-  //                      scratch, string_length, elements.
-  __ jmp(&loop_condition);
-  __ bind(&loop);
-  __ cmp(index, Operand(array_length));
-  __ j(greater_equal, &done);
-
-  __ mov(string, FieldOperand(elements, index,
-                                      times_pointer_size,
-                                      FixedArray::kHeaderSize));
-  __ test(string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-  __ add(string_length,
-         FieldOperand(string, SeqAsciiString::kLengthOffset));
-  __ j(overflow, &bailout);
-  __ add(Operand(index), Immediate(1));
-  __ bind(&loop_condition);
-  __ cmp(index, Operand(array_length));
-  __ j(less, &loop);
-
-  // If array_length is 1, return elements[0], a string.
-  __ cmp(array_length, 1);
-  __ j(not_equal, &not_size_one_array);
-  __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
-  __ mov(result_operand, scratch);
-  __ jmp(&done);
-
-  __ bind(&not_size_one_array);
-
-  // End of array_length live range.
-  result_pos = array_length;
-  array_length = no_reg;
-
-  // Live registers:
-  // string_length: Sum of string lengths, as a smi.
-  // elements: FixedArray of strings.
-
-  // Check that the separator is a flat ASCII string.
-  __ mov(string, separator_operand);
-  __ test(string, Immediate(kSmiTagMask));
-  __ j(zero, &bailout);
-  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, Immediate(
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
-  __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
-  __ j(not_equal, &bailout);
-
-  // Add (separator length times array_length) - separator length
-  // to string_length.
-  __ mov(scratch, separator_operand);
-  __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
-  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
-  __ imul(scratch, array_length_operand);
-  __ j(overflow, &bailout);
-  __ add(string_length, Operand(scratch));
-  __ j(overflow, &bailout);
-
-  __ shr(string_length, 1);
-  // Live registers and stack values:
-  //   string_length
-  //   elements
-  __ AllocateAsciiString(result_pos, string_length, scratch,
-                         index, string, &bailout);
-  __ mov(result_operand, result_pos);
-  __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
-
-
-  __ mov(string, separator_operand);
-  __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
-         Immediate(Smi::FromInt(1)));
-  __ j(equal, &one_char_separator);
-  __ j(greater, &long_separator);
-
-
-  // Empty separator case
-  __ mov(index, Immediate(0));
-  __ jmp(&loop_1_condition);
-  // Loop condition: while (index < length).
-  __ bind(&loop_1);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-  //   elements: the FixedArray of strings we are joining.
-
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
-  __ bind(&loop_1_condition);
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_1);  // End while (index < length).
-  __ jmp(&done);
-
-
-
-  // One-character separator case
-  __ bind(&one_char_separator);
-  // Replace separator with its ascii character value.
-  __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ mov_b(separator_operand, scratch);
-
-  __ Set(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_2_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_2);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator character to the result.
-  __ mov_b(scratch, separator_operand);
-  __ mov_b(Operand(result_pos, 0), scratch);
-  __ inc(result_pos);
-
-  __ bind(&loop_2_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_2);  // End while (index < length).
-  __ jmp(&done);
-
-
-  // Long separator case (separator is more than one character).
-  __ bind(&long_separator);
-
-  __ Set(index, Immediate(0));
-  // Jump into the loop after the code that copies the separator, so the first
-  // element is not preceded by a separator
-  __ jmp(&loop_3_entry);
-  // Loop condition: while (index < length).
-  __ bind(&loop_3);
-  // Each iteration of the loop concatenates one string to the result.
-  // Live values in registers:
-  //   index: which element of the elements array we are adding to the result.
-  //   result_pos: the position to which we are currently copying characters.
-
-  // Copy the separator to the result.
-  __ mov(string, separator_operand);
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-
-  __ bind(&loop_3_entry);
-  // Get string = array[index].
-  __ mov(string, FieldOperand(elements, index,
-                              times_pointer_size,
-                              FixedArray::kHeaderSize));
-  __ mov(string_length,
-         FieldOperand(string, String::kLengthOffset));
-  __ shr(string_length, 1);
-  __ lea(string,
-         FieldOperand(string, SeqAsciiString::kHeaderSize));
-  __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
-
-  __ cmp(index, array_length_operand);
-  __ j(less, &loop_3);  // End while (index < length).
-  __ jmp(&done);
-
-
-  __ bind(&bailout);
-  __ mov(result_operand, FACTORY->undefined_value());
-  __ bind(&done);
-  __ mov(eax, result_operand);
-  // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
-
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  frame_->Drop(1);
-  frame_->Push(&array_result);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // Check if the object is a regexp.
-  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
-  value.Unuse();
-  temp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(zero);
-  __ cmp(obj.reg(), FACTORY->null_value());
-  destination()->true_target()->Branch(equal);
-
-  Result map = allocator()->Allocate();
-  ASSERT(map.is_valid());
-  __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  destination()->false_target()->Branch(not_zero);
-  // Do a range test for JSObject type.  We can't use
-  // MacroAssembler::IsInstanceJSObjectType, because we are using a
-  // ControlDestination, so we copy its implementation here.
-  __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
-  __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
-  __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-  obj.Unuse();
-  map.Unuse();
-  destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-
-  // Check that this is an object.
-  frame_->Spill(value.reg());
-  __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, value.reg());
-  value.Unuse();
-  destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ cmp(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-      __ Assert(equal, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ mov(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
-    __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ cmp(scratch1_, FACTORY->hash_table_map());
-    __ j(equal, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ mov(map_result_,
-           FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ mov(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch1_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kPointerSize == 4);
-    __ lea(scratch1_,
-           Operand(map_result_, scratch1_, times_2, FixedArray::kHeaderSize));
-    // Calculate location of the first key name.
-    __ add(Operand(map_result_),
-           Immediate(FixedArray::kHeaderSize +
-                     DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(scratch2_, FieldOperand(map_result_, 0));
-    __ cmp(scratch2_, FACTORY->value_of_symbol());
-    __ j(equal, &false_result);
-    __ add(Operand(map_result_), Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmp(map_result_, Operand(scratch1_));
-    __ j(not_equal, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ mov(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ mov(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
-    __ test(scratch1_, Immediate(kSmiTagMask));
-    __ j(zero, &false_result);
-    __ mov(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ mov(scratch2_, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ mov(scratch2_,
-           FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ cmp(scratch1_,
-           ContextOperand(scratch2_,
-                          Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ j(not_equal, &false_result);
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ Set(map_result_, Immediate(1));
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ Set(map_result_, Immediate(0));
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();  // Pop the string wrapper.
-  obj.ToRegister();
-  ASSERT(obj.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj.reg());
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Result map_result = allocator()->Allocate();
-  ASSERT(map_result.is_valid());
-  __ mov(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ test_b(FieldOperand(map_result.reg(), Map::kBitField2Offset),
-            1 << Map::kStringWrapperSafeForDefaultValueOf);
-  destination()->true_target()->Branch(not_zero);
-
-  // We need an additional two scratch registers for the deferred code.
-  Result temp1 = allocator()->Allocate();
-  ASSERT(temp1.is_valid());
-  Result temp2 = allocator()->Allocate();
-  ASSERT(temp2.is_valid());
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
-  deferred->Branch(zero);
-  deferred->BindExit();
-  __ test(map_result.reg(), Operand(map_result.reg()));
-  obj.Unuse();
-  map_result.Unuse();
-  temp1.Unuse();
-  temp2.Unuse();
-  destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(zero);
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, temp.reg());
-  obj.Unuse();
-  temp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(zero);
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ mov(temp.reg(),
-         FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-            1 << Map::kIsUndetectable);
-  obj.Unuse();
-  temp.Unuse();
-  destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  // Get the frame pointer for the calling frame.
-  Result fp = allocator()->Allocate();
-  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &check_frame_marker);
-  __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  fp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Result fp = allocator_->Allocate();
-  Result result = allocator_->Allocate();
-  ASSERT(fp.is_valid() && result.is_valid());
-
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ Set(result.reg(), Immediate(Smi::FromInt(scope()->num_parameters())));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ mov(result.reg(),
-         Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  result.set_type_info(TypeInfo::Smi());
-  if (FLAG_debug_code) __ AbortIfNotSmi(result.reg());
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave, null, function, non_function_constructor;
-  Load(args->at(0));  // Load the object.
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  frame_->Spill(obj.reg());
-
-  // If the object is a smi, we return null.
-  __ test(obj.reg(), Immediate(kSmiTagMask));
-  null.Branch(zero);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
-  null.Branch(below);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
-  function.Branch(equal);
-
-  // Check if the constructor in the map is a function.
-  { Result tmp = allocator()->Allocate();
-    __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
-    __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg());
-    non_function_constructor.Branch(not_equal);
-  }
-
-  // The map register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ mov(obj.reg(),
-         FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
-  __ mov(obj.reg(),
-         FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->Push(&obj);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  frame_->Push(FACTORY->function_class_symbol());
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  frame_->Push(FACTORY->Object_symbol());
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  frame_->Push(FACTORY->null_value());
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  frame_->Dup();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  ASSERT(object.is_valid());
-  // if (object->IsSmi()) return object.
-  __ test(object.reg(), Immediate(kSmiTagMask));
-  leave.Branch(zero, taken);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // if (!object->IsJSValue()) return object.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
-  leave.Branch(not_equal, not_taken);
-  __ mov(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
-  object.Unuse();
-  frame_->SetElementAt(0, &temp);
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  Load(args->at(1));  // Load the value.
-  Result value = frame_->Pop();
-  Result object = frame_->Pop();
-  value.ToRegister();
-  object.ToRegister();
-
-  // if (object->IsSmi()) return value.
-  __ test(object.reg(), Immediate(kSmiTagMask));
-  leave.Branch(zero, &value, taken);
-
-  // It is a heap object - get its map.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  // if (!object->IsJSValue()) return value.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
-  leave.Branch(not_equal, &value, not_taken);
-
-  // Store the value.
-  __ mov(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  Result duplicate_value = allocator_->Allocate();
-  ASSERT(duplicate_value.is_valid());
-  __ mov(duplicate_value.reg(), value.reg());
-  // The object register is also overwritten by the write barrier and
-  // possibly aliased in the frame.
-  frame_->Spill(object.reg());
-  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
-                 scratch.reg());
-  object.Unuse();
-  scratch.Unuse();
-  duplicate_value.Unuse();
-
-  // Leave.
-  leave.Bind(&value);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in edx and the formal
-  // parameter count in eax.
-  Load(args->at(0));
-  Result key = frame_->Pop();
-  // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  Result result = frame_->CallStub(&stub, &key, &count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-  right.ToRegister();
-  left.ToRegister();
-  __ cmp(right.reg(), Operand(left.reg()));
-  right.Unuse();
-  left.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  STATIC_ASSERT(kSmiTag == 0);  // EBP value is aligned, so it looks like a Smi.
-  Result ebp_as_smi = allocator_->Allocate();
-  ASSERT(ebp_as_smi.is_valid());
-  __ mov(ebp_as_smi.reg(), Operand(ebp));
-  frame_->Push(&ebp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  frame_->SpillAll();
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-
-  __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ mov(edi, eax);
-
-  __ bind(&heapnumber_allocated);
-
-  __ PrepareCallCFunction(0, ebx);
-  __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
-                   0);
-
-  // Convert 32 random bits in eax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  // This is implemented on both SSE2 and FPU.
-  if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-    CpuFeatures::Scope fscope(SSE2);
-    __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-    __ movd(xmm1, Operand(ebx));
-    __ movd(xmm0, Operand(eax));
-    __ cvtss2sd(xmm1, xmm1);
-    __ pxor(xmm0, xmm1);
-    __ subsd(xmm0, xmm1);
-    __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
-  } else {
-    // 0x4130000000000000 is 1.0 x 2^20 as a double.
-    __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
-           Immediate(0x41300000));
-    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
-    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
-    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
-    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
-    __ fsubp(1);
-    __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
-  }
-  __ mov(eax, edi);
-
-  Result result = allocator_->Allocate(eax);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  Result answer = frame_->CallStub(&stub, 3);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(4, args->length());
-
-  // Load the arguments on the stack and call the stub.
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-
-  RegExpExecStub stub;
-  Result result = frame_->CallStub(&stub, 4);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-
-  RegExpConstructResultStub stub;
-  Result result = frame_->CallStub(&stub, 3);
-  frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst, Register cache, Register key)
-      : dst_(dst), cache_(cache), key_(key) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;    // on invocation Smi index of finger, on exit
-                    // holds value being looked up.
-  Register cache_;  // instance of JSFunctionResultCache.
-  Register key_;    // key being looked up.
-};
-
-
-void DeferredSearchCache::Generate() {
-  Label first_loop, search_further, second_loop, cache_miss;
-
-  // Smi-tagging is equivalent to multiplying by 2.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  Smi* kEntrySizeSmi = Smi::FromInt(JSFunctionResultCache::kEntrySize);
-  Smi* kEntriesIndexSmi = Smi::FromInt(JSFunctionResultCache::kEntriesIndex);
-
-  // Check the cache from finger to start of the cache.
-  __ bind(&first_loop);
-  __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
-  __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
-  __ j(less, &search_further);
-
-  __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
-  __ j(not_equal, &first_loop);
-
-  __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&search_further);
-
-  // Check the cache from end of cache up to finger.
-  __ mov(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
-
-  __ bind(&second_loop);
-  __ sub(Operand(dst_), Immediate(kEntrySizeSmi));
-    // Consider prefetching into some reg.
-  __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-  __ j(less_equal, &cache_miss);
-
-  __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
-  __ j(not_equal, &second_loop);
-
-  __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&cache_miss);
-  __ push(cache_);  // store a reference to cache
-  __ push(key_);  // store a key
-  __ push(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ push(key_);
-  // On ia32 function must be in edi.
-  __ mov(edi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
-  ParameterCount expected(1);
-  __ InvokeFunction(edi, expected, CALL_FUNCTION);
-
-  // Find a place to put new cached value into.
-  Label add_new_entry, update_cache;
-  __ mov(ecx, Operand(esp, kPointerSize));  // restore the cache
-  // Possible optimization: cache size is constant for the given cache
-  // so technically we could use a constant here.  However, if we have
-  // cache miss this optimization would hardly matter much.
-
-  // Check if we could add new entry to cache.
-  __ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
-  __ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
-  __ j(greater, &add_new_entry);
-
-  // Check if we could evict entry after finger.
-  __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
-  __ add(Operand(edx), Immediate(kEntrySizeSmi));
-  __ cmp(ebx, Operand(edx));
-  __ j(greater, &update_cache);
-
-  // Need to wrap over the cache.
-  __ mov(edx, Immediate(kEntriesIndexSmi));
-  __ jmp(&update_cache);
-
-  __ bind(&add_new_entry);
-  __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
-  __ lea(ebx, Operand(edx, JSFunctionResultCache::kEntrySize << 1));
-  __ mov(FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset), ebx);
-
-  // Update the cache itself.
-  // edx holds the index.
-  __ bind(&update_cache);
-  __ pop(ebx);  // restore the key
-  __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
-  // Store key.
-  __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
-  __ RecordWrite(ecx, 0, ebx, edx);
-
-  // Store value.
-  __ pop(ecx);  // restore the cache.
-  __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
-  __ add(Operand(edx), Immediate(Smi::FromInt(1)));
-  __ mov(ebx, eax);
-  __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
-  __ RecordWrite(ecx, 0, ebx, edx);
-
-  if (!dst_.is(eax)) {
-    __ mov(dst_, eax);
-  }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      masm()->isolate()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->Push(FACTORY->undefined_value());
-    return;
-  }
-
-  Load(args->at(1));
-  Result key = frame_->Pop();
-  key.ToRegister();
-
-  Result cache = allocator()->Allocate();
-  ASSERT(cache.is_valid());
-  __ mov(cache.reg(), ContextOperand(esi, Context::GLOBAL_INDEX));
-  __ mov(cache.reg(),
-         FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
-  __ mov(cache.reg(),
-         ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ mov(cache.reg(),
-         FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
-                                                          cache.reg(),
-                                                          key.reg());
-
-  // tmp.reg() now holds finger offset as a smi.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ mov(tmp.reg(), FieldOperand(cache.reg(),
-                                 JSFunctionResultCache::kFingerOffset));
-  __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
-  deferred->Branch(not_equal);
-
-  __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
-
-  deferred->BindExit();
-  frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and call the stub.
-  Load(args->at(0));
-  NumberToStringStub stub;
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  // Note: this code assumes that indices are passed are within
-  // elements' bounds and refer to valid (not holes) values.
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  Result index2 = frame_->Pop();
-  index2.ToRegister();
-
-  Result index1 = frame_->Pop();
-  index1.ToRegister();
-
-  Result object = frame_->Pop();
-  object.ToRegister();
-
-  Result tmp1 = allocator()->Allocate();
-  tmp1.ToRegister();
-  Result tmp2 = allocator()->Allocate();
-  tmp2.ToRegister();
-
-  frame_->Spill(object.reg());
-  frame_->Spill(index1.reg());
-  frame_->Spill(index2.reg());
-
-  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
-                                                            index1.reg(),
-                                                            index2.reg());
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
-  deferred->Branch(below);
-  __ test_b(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
-            KeyedLoadIC::kSlowCaseBitFieldMask);
-  deferred->Branch(not_zero);
-
-  // Check the object's elements are in fast case and writable.
-  __ mov(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
-  __ cmp(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
-         Immediate(FACTORY->fixed_array_map()));
-  deferred->Branch(not_equal);
-
-  // Smi-tagging is equivalent to multiplying by 2.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  // Check that both indices are smis.
-  __ mov(tmp2.reg(), index1.reg());
-  __ or_(tmp2.reg(), Operand(index2.reg()));
-  __ test(tmp2.reg(), Immediate(kSmiTagMask));
-  deferred->Branch(not_zero);
-
-  // Check that both indices are valid.
-  __ mov(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
-  __ cmp(tmp2.reg(), Operand(index1.reg()));
-  deferred->Branch(below_equal);
-  __ cmp(tmp2.reg(), Operand(index2.reg()));
-  deferred->Branch(below_equal);
-
-  // Bring addresses into index1 and index2.
-  __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
-  __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
-
-  // Swap elements.
-  __ mov(object.reg(), Operand(index1.reg(), 0));
-  __ mov(tmp2.reg(),   Operand(index2.reg(), 0));
-  __ mov(Operand(index2.reg(), 0), object.reg());
-  __ mov(Operand(index1.reg(), 0), tmp2.reg());
-
-  Label done;
-  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
-
-  __ mov(tmp2.reg(), tmp1.reg());
-  __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
-  __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
-  __ bind(&done);
-
-  deferred->BindExit();
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  Result result = frame_->CallJSFunction(n_args);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-  if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-    Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
-    frame_->Push(&res);
-  } else {
-    CpuFeatures::Scope use_sse2(SSE2);
-    Label allocate_return;
-    // Load the two operands while leaving the values on the frame.
-    frame()->Dup();
-    Result exponent = frame()->Pop();
-    exponent.ToRegister();
-    frame()->Spill(exponent.reg());
-    frame()->PushElementAt(1);
-    Result base = frame()->Pop();
-    base.ToRegister();
-    frame()->Spill(base.reg());
-
-    Result answer = allocator()->Allocate();
-    ASSERT(answer.is_valid());
-    ASSERT(!exponent.reg().is(base.reg()));
-    JumpTarget call_runtime;
-
-    // Save 1 in xmm3 - we need this several times later on.
-    __ mov(answer.reg(), Immediate(1));
-    __ cvtsi2sd(xmm3, Operand(answer.reg()));
-
-    Label exponent_nonsmi;
-    Label base_nonsmi;
-    // If the exponent is a heap number go to that specific case.
-    __ test(exponent.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &exponent_nonsmi);
-    __ test(base.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &base_nonsmi);
-
-    // Optimized version when y is an integer.
-    Label powi;
-    __ SmiUntag(base.reg());
-    __ cvtsi2sd(xmm0, Operand(base.reg()));
-    __ jmp(&powi);
-    // exponent is smi and base is a heapnumber.
-    __ bind(&base_nonsmi);
-    __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    call_runtime.Branch(not_equal);
-
-    __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-    // Optimized version of pow if y is an integer.
-    __ bind(&powi);
-    __ SmiUntag(exponent.reg());
-
-    // Save exponent in base as we need to check if exponent is negative later.
-    // We know that base and exponent are in different registers.
-    __ mov(base.reg(), exponent.reg());
-
-    // Get absolute value of exponent.
-    Label no_neg;
-    __ cmp(exponent.reg(), 0);
-    __ j(greater_equal, &no_neg);
-    __ neg(exponent.reg());
-    __ bind(&no_neg);
-
-    // Load xmm1 with 1.
-    __ movsd(xmm1, xmm3);
-    Label while_true;
-    Label no_multiply;
-
-    __ bind(&while_true);
-    __ shr(exponent.reg(), 1);
-    __ j(not_carry, &no_multiply);
-    __ mulsd(xmm1, xmm0);
-    __ bind(&no_multiply);
-    __ test(exponent.reg(), Operand(exponent.reg()));
-    __ mulsd(xmm0, xmm0);
-    __ j(not_zero, &while_true);
-
-    // x has the original value of y - if y is negative return 1/result.
-    __ test(base.reg(), Operand(base.reg()));
-    __ j(positive, &allocate_return);
-    // Special case if xmm1 has reached infinity.
-    __ mov(answer.reg(), Immediate(0x7FB00000));
-    __ movd(xmm0, Operand(answer.reg()));
-    __ cvtss2sd(xmm0, xmm0);
-    __ ucomisd(xmm0, xmm1);
-    call_runtime.Branch(equal);
-    __ divsd(xmm3, xmm1);
-    __ movsd(xmm1, xmm3);
-    __ jmp(&allocate_return);
-
-    // exponent (or both) is a heapnumber - no matter what we should now work
-    // on doubles.
-    __ bind(&exponent_nonsmi);
-    __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    call_runtime.Branch(not_equal);
-    __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
-    // Test if exponent is nan.
-    __ ucomisd(xmm1, xmm1);
-    call_runtime.Branch(parity_even);
-
-    Label base_not_smi;
-    Label handle_special_cases;
-    __ test(base.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &base_not_smi);
-    __ SmiUntag(base.reg());
-    __ cvtsi2sd(xmm0, Operand(base.reg()));
-    __ jmp(&handle_special_cases);
-    __ bind(&base_not_smi);
-    __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    call_runtime.Branch(not_equal);
-    __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
-    __ and_(answer.reg(), HeapNumber::kExponentMask);
-    __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
-    // base is NaN or +/-Infinity
-    call_runtime.Branch(greater_equal);
-    __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-    // base is in xmm0 and exponent is in xmm1.
-    __ bind(&handle_special_cases);
-    Label not_minus_half;
-    // Test for -0.5.
-    // Load xmm2 with -0.5.
-    __ mov(answer.reg(), Immediate(0xBF000000));
-    __ movd(xmm2, Operand(answer.reg()));
-    __ cvtss2sd(xmm2, xmm2);
-    // xmm2 now has -0.5.
-    __ ucomisd(xmm2, xmm1);
-    __ j(not_equal, &not_minus_half);
-
-    // Calculates reciprocal of square root.
-    // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-    __ xorpd(xmm1, xmm1);
-    __ addsd(xmm1, xmm0);
-    __ sqrtsd(xmm1, xmm1);
-    __ divsd(xmm3, xmm1);
-    __ movsd(xmm1, xmm3);
-    __ jmp(&allocate_return);
-
-    // Test for 0.5.
-    __ bind(&not_minus_half);
-    // Load xmm2 with 0.5.
-    // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-    __ addsd(xmm2, xmm3);
-    // xmm2 now has 0.5.
-    __ ucomisd(xmm2, xmm1);
-    call_runtime.Branch(not_equal);
-    // Calculates square root.
-    // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-    __ xorpd(xmm1, xmm1);
-    __ addsd(xmm1, xmm0);
-    __ sqrtsd(xmm1, xmm1);
-
-    JumpTarget done;
-    Label failure, success;
-    __ bind(&allocate_return);
-    // Make a copy of the frame to enable us to handle allocation
-    // failure after the JumpTarget jump.
-    VirtualFrame* clone = new VirtualFrame(frame());
-    __ AllocateHeapNumber(answer.reg(), exponent.reg(),
-                          base.reg(), &failure);
-    __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
-    // Remove the two original values from the frame - we only need those
-    // in the case where we branch to runtime.
-    frame()->Drop(2);
-    exponent.Unuse();
-    base.Unuse();
-    done.Jump(&answer);
-    // Use the copy of the original frame as our current frame.
-    RegisterFile empty_regs;
-    SetFrame(clone, &empty_regs);
-    // If we experience an allocation failure we branch to runtime.
-    __ bind(&failure);
-    call_runtime.Bind();
-    answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
-    done.Bind(&answer);
-    frame()->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-
-  if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) {
-    Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-    frame()->Push(&result);
-  } else {
-    CpuFeatures::Scope use_sse2(SSE2);
-    // Leave original value on the frame if we need to call runtime.
-    frame()->Dup();
-    Result result = frame()->Pop();
-    result.ToRegister();
-    frame()->Spill(result.reg());
-    Label runtime;
-    Label non_smi;
-    Label load_done;
-    JumpTarget end;
-
-    __ test(result.reg(), Immediate(kSmiTagMask));
-    __ j(not_zero, &non_smi);
-    __ SmiUntag(result.reg());
-    __ cvtsi2sd(xmm0, Operand(result.reg()));
-    __ jmp(&load_done);
-    __ bind(&non_smi);
-    __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
-           FACTORY->heap_number_map());
-    __ j(not_equal, &runtime);
-    __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
-    __ bind(&load_done);
-    __ sqrtsd(xmm0, xmm0);
-    // A copy of the virtual frame to allow us to go to runtime after the
-    // JumpTarget jump.
-    Result scratch = allocator()->Allocate();
-    VirtualFrame* clone = new VirtualFrame(frame());
-    __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
-
-    __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
-    frame()->Drop(1);
-    scratch.Unuse();
-    end.Jump(&result);
-    // We only branch to runtime if we have an allocation error.
-    // Use the copy of the original frame as our current frame.
-    RegisterFile empty_regs;
-    SetFrame(clone, &empty_regs);
-    __ bind(&runtime);
-    result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
-    end.Bind(&result);
-    frame()->Push(&result);
-  }
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right_res = frame_->Pop();
-  Result left_res = frame_->Pop();
-  right_res.ToRegister();
-  left_res.ToRegister();
-  Result tmp_res = allocator()->Allocate();
-  ASSERT(tmp_res.is_valid());
-  Register right = right_res.reg();
-  Register left = left_res.reg();
-  Register tmp = tmp_res.reg();
-  right_res.Unuse();
-  left_res.Unuse();
-  tmp_res.Unuse();
-  __ cmp(left, Operand(right));
-  destination()->true_target()->Branch(equal);
-  // Fail if either is a non-HeapObject.
-  __ mov(tmp, left);
-  __ and_(Operand(tmp), right);
-  __ test(Operand(tmp), Immediate(kSmiTagMask));
-  destination()->false_target()->Branch(equal);
-  __ CmpObjectType(left, JS_REGEXP_TYPE, tmp);
-  destination()->false_target()->Branch(not_equal);
-  __ cmp(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  destination()->false_target()->Branch(not_equal);
-  __ mov(tmp, FieldOperand(left, JSRegExp::kDataOffset));
-  __ cmp(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfNotString(value.reg());
-  }
-
-  __ test(FieldOperand(value.reg(), String::kHashFieldOffset),
-          Immediate(String::kContainsCachedArrayIndexMask));
-
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result string = frame_->Pop();
-  string.ToRegister();
-  if (FLAG_debug_code) {
-    __ AbortIfNotString(string.reg());
-  }
-
-  Result number = allocator()->Allocate();
-  ASSERT(number.is_valid());
-  __ mov(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
-  __ IndexFromHash(number.reg(), number.reg());
-  string.Unuse();
-  frame_->Push(&number);
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  ASSERT(!in_safe_int32_mode());
-  if (CheckForInlineRuntimeCall(node)) {
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Push the builtins object found in the current global object.
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), GlobalObjectOperand());
-    __ mov(temp.reg(), FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
-    frame_->Push(&temp);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    frame_->Push(node->name());
-    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                       arg_count,
-                                       loop_nesting_);
-    frame_->RestoreContextRegister();
-    frame_->Push(&answer);
-  } else {
-    // Call the C runtime function.
-    Result answer = frame_->CallRuntime(function, arg_count);
-    frame_->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    // Swap the true and false targets but keep the same actual label
-    // as the fall through.
-    destination()->Invert();
-    LoadCondition(node->expression(), destination(), true);
-    // Swap the labels back.
-    destination()->Invert();
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
-      frame_->Push(&answer);
-      return;
-    }
-
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->Push(variable->name());
-        frame_->Push(Smi::FromInt(kNonStrictMode));
-        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
-                                              CALL_FUNCTION, 3);
-        frame_->Push(&answer);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Call the runtime to delete from the context holding the named
-        // variable.  Sync the virtual frame eagerly so we can push the
-        // arguments directly into place.
-        frame_->SyncRange(0, frame_->element_count() - 1);
-        frame_->EmitPush(esi);
-        frame_->EmitPush(Immediate(variable->name()));
-        Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->Push(&answer);
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->Push(FACTORY->false_value());
-      }
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->SetElementAt(0, FACTORY->true_value());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(&answer);
-
-  } else if (op == Token::VOID) {
-    Expression* expression = node->expression();
-    if (expression && expression->AsLiteral() && (
-        expression->AsLiteral()->IsTrue() ||
-        expression->AsLiteral()->IsFalse() ||
-        expression->AsLiteral()->handle()->IsNumber() ||
-        expression->AsLiteral()->handle()->IsString() ||
-        expression->AsLiteral()->handle()->IsJSRegExp() ||
-        expression->AsLiteral()->IsNull())) {
-      // Omit evaluating the value of the primitive literal.
-      // It will be discarded anyway, and can have no side effect.
-      frame_->Push(FACTORY->undefined_value());
-    } else {
-      Load(node->expression());
-      frame_->SetElementAt(0, FACTORY->undefined_value());
-    }
-
-  } else {
-    if (in_safe_int32_mode()) {
-      Visit(node->expression());
-      Result value = frame_->Pop();
-      ASSERT(value.is_untagged_int32());
-      // Registers containing an int32 value are not multiply used.
-      ASSERT(!value.is_register() || !frame_->is_used(value.reg()));
-      value.ToRegister();
-      switch (op) {
-        case Token::SUB: {
-          __ neg(value.reg());
-          frame_->Push(&value);
-          if (node->no_negative_zero()) {
-            // -MIN_INT is MIN_INT with the overflow flag set.
-            unsafe_bailout_->Branch(overflow);
-          } else {
-            // MIN_INT and 0 both have bad negations.  They both have 31 zeros.
-            __ test(value.reg(), Immediate(0x7FFFFFFF));
-            unsafe_bailout_->Branch(zero);
-          }
-          break;
-        }
-        case Token::BIT_NOT: {
-          __ not_(value.reg());
-          frame_->Push(&value);
-          break;
-        }
-        case Token::ADD: {
-          // Unary plus has no effect on int32 values.
-          frame_->Push(&value);
-          break;
-        }
-        default:
-          UNREACHABLE();
-          break;
-      }
-    } else {
-      Load(node->expression());
-      bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-      UnaryOverwriteMode overwrite =
-          can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-      bool no_negative_zero = node->expression()->no_negative_zero();
-      switch (op) {
-        case Token::NOT:
-        case Token::DELETE:
-        case Token::TYPEOF:
-          UNREACHABLE();  // handled above
-          break;
-
-        case Token::SUB: {
-          GenericUnaryOpStub stub(
-              Token::SUB,
-              overwrite,
-              NO_UNARY_FLAGS,
-              no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-          Result operand = frame_->Pop();
-          Result answer = frame_->CallStub(&stub, &operand);
-          answer.set_type_info(TypeInfo::Number());
-          frame_->Push(&answer);
-          break;
-        }
-        case Token::BIT_NOT: {
-          // Smi check.
-          JumpTarget smi_label;
-          JumpTarget continue_label;
-          Result operand = frame_->Pop();
-          TypeInfo operand_info = operand.type_info();
-          operand.ToRegister();
-          if (operand_info.IsSmi()) {
-            if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg());
-            frame_->Spill(operand.reg());
-            // Set smi tag bit. It will be reset by the not operation.
-            __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask));
-            __ not_(operand.reg());
-            Result answer = operand;
-            answer.set_type_info(TypeInfo::Smi());
-            frame_->Push(&answer);
-          } else {
-            __ test(operand.reg(), Immediate(kSmiTagMask));
-            smi_label.Branch(zero, &operand, taken);
-
-            GenericUnaryOpStub stub(Token::BIT_NOT,
-                                    overwrite,
-                                    NO_UNARY_SMI_CODE_IN_STUB);
-            Result answer = frame_->CallStub(&stub, &operand);
-            continue_label.Jump(&answer);
-
-            smi_label.Bind(&answer);
-            answer.ToRegister();
-            frame_->Spill(answer.reg());
-            // Set smi tag bit. It will be reset by the not operation.
-            __ lea(answer.reg(), Operand(answer.reg(), kSmiTagMask));
-            __ not_(answer.reg());
-
-            continue_label.Bind(&answer);
-            answer.set_type_info(TypeInfo::Integer32());
-            frame_->Push(&answer);
-          }
-          break;
-        }
-        case Token::ADD: {
-          // Smi check.
-          JumpTarget continue_label;
-          Result operand = frame_->Pop();
-          TypeInfo operand_info = operand.type_info();
-          operand.ToRegister();
-          __ test(operand.reg(), Immediate(kSmiTagMask));
-          continue_label.Branch(zero, &operand, taken);
-
-          frame_->Push(&operand);
-          Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
-                                              CALL_FUNCTION, 1);
-
-          continue_label.Bind(&answer);
-          if (operand_info.IsSmi()) {
-            answer.set_type_info(TypeInfo::Smi());
-          } else if (operand_info.IsInteger32()) {
-            answer.set_type_info(TypeInfo::Integer32());
-          } else {
-            answer.set_type_info(TypeInfo::Number());
-          }
-          frame_->Push(&answer);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
-    }
-  }
-}
-
-
-// The value in dst was optimistically incremented or decremented.  The
-// result overflowed or was not smi tagged.  Undo the operation, call
-// into the runtime to convert the argument to a number, and call the
-// specialized add or subtract stub.  The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
-  DeferredPrefixCountOperation(Register dst,
-                               bool is_increment,
-                               TypeInfo input_type)
-      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
-  // Undo the optimistic smi operation.
-  if (is_increment_) {
-    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
-  } else {
-    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
-  }
-  Register left;
-  if (input_type_.IsNumber()) {
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    left = eax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.  The
-// result overflowed or was not smi tagged.  Undo the operation and call
-// into the runtime to convert the argument to a number.  Update the
-// original value in old.  Call the specialized add or subtract stub.
-// The result is left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
-  DeferredPostfixCountOperation(Register dst,
-                                Register old,
-                                bool is_increment,
-                                TypeInfo input_type)
-      : dst_(dst),
-        old_(old),
-        is_increment_(is_increment),
-        input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Register old_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
-  // Undo the optimistic smi operation.
-  if (is_increment_) {
-    __ sub(Operand(dst_), Immediate(Smi::FromInt(1)));
-  } else {
-    __ add(Operand(dst_), Immediate(Smi::FromInt(1)));
-  }
-  Register left;
-  if (input_type_.IsNumber()) {
-    __ push(dst_);  // Save the input to use as the old value.
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    __ push(eax);  // Save the result of ToNumber to use as the old value.
-    left = eax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CountOperation");
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
-  // Postfix operations need a stack slot under the reference to hold
-  // the old value while the new value is being stored.  This is so that
-  // in the case that storing the new value requires a call, the old
-  // value will be in the frame to be spilled.
-  if (is_postfix) frame_->Push(Smi::FromInt(0));
-
-  // A constant reference is not saved to, so a constant reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!is_postfix) frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    target.TakeValue();
-
-    Result new_value = frame_->Pop();
-    new_value.ToRegister();
-
-    Result old_value;  // Only allocated in the postfix case.
-    if (is_postfix) {
-      // Allocate a temporary to preserve the old value.
-      old_value = allocator_->Allocate();
-      ASSERT(old_value.is_valid());
-      __ mov(old_value.reg(), new_value.reg());
-
-      // The return value for postfix operations is ToNumber(input).
-      // Keep more precise type info if the input is some kind of
-      // number already. If the input is not a number we have to wait
-      // for the deferred code to convert it.
-      if (new_value.type_info().IsNumber()) {
-        old_value.set_type_info(new_value.type_info());
-      }
-    }
-
-    // Ensure the new value is writable.
-    frame_->Spill(new_value.reg());
-
-    Result tmp;
-    if (new_value.is_smi()) {
-      if (FLAG_debug_code) __ AbortIfNotSmi(new_value.reg());
-    } else {
-      // We don't know statically if the input is a smi.
-      // In order to combine the overflow and the smi tag check, we need
-      // to be able to allocate a byte register.  We attempt to do so
-      // without spilling.  If we fail, we will generate separate overflow
-      // and smi tag checks.
-      // We allocate and clear a temporary byte register before performing
-      // the count operation since clearing the register using xor will clear
-      // the overflow flag.
-      tmp = allocator_->AllocateByteRegisterWithoutSpilling();
-      if (tmp.is_valid()) {
-        __ Set(tmp.reg(), Immediate(0));
-      }
-    }
-
-    if (is_increment) {
-      __ add(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
-    } else {
-      __ sub(Operand(new_value.reg()), Immediate(Smi::FromInt(1)));
-    }
-
-    DeferredCode* deferred = NULL;
-    if (is_postfix) {
-      deferred = new DeferredPostfixCountOperation(new_value.reg(),
-                                                   old_value.reg(),
-                                                   is_increment,
-                                                   new_value.type_info());
-    } else {
-      deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment,
-                                                  new_value.type_info());
-    }
-
-    if (new_value.is_smi()) {
-      // In case we have a smi as input just check for overflow.
-      deferred->Branch(overflow);
-    } else {
-      // If the count operation didn't overflow and the result is a valid
-      // smi, we're done. Otherwise, we jump to the deferred slow-case
-      // code.
-      // We combine the overflow and the smi tag check if we could
-      // successfully allocate a temporary byte register.
-      if (tmp.is_valid()) {
-        __ setcc(overflow, tmp.reg());
-        __ or_(Operand(tmp.reg()), new_value.reg());
-        __ test(tmp.reg(), Immediate(kSmiTagMask));
-        tmp.Unuse();
-        deferred->Branch(not_zero);
-      } else {
-        // Otherwise we test separately for overflow and smi tag.
-        deferred->Branch(overflow);
-        __ test(new_value.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-      }
-    }
-    deferred->BindExit();
-
-    // Postfix count operations return their input converted to
-    // number. The case when the input is already a number is covered
-    // above in the allocation code for old_value.
-    if (is_postfix && !new_value.type_info().IsNumber()) {
-      old_value.set_type_info(TypeInfo::Number());
-    }
-
-    // The result of ++ or -- is an Integer32 if the
-    // input is a smi. Otherwise it is a number.
-    if (new_value.is_smi()) {
-      new_value.set_type_info(TypeInfo::Integer32());
-    } else {
-      new_value.set_type_info(TypeInfo::Number());
-    }
-
-    // Postfix: store the old value in the allocated slot under the
-    // reference.
-    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
-    frame_->Push(&new_value);
-    // Non-constant: update the reference.
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
-  }
-
-  // Postfix: drop the new value and use the old.
-  if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) {
-  Token::Value op = node->op();
-  Comment cmnt(masm_, "[ Int32BinaryOperation");
-  ASSERT(in_safe_int32_mode());
-  ASSERT(safe_int32_mode_enabled());
-  ASSERT(FLAG_safe_int32_compiler);
-
-  if (op == Token::COMMA) {
-    // Discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  ASSERT(right.is_untagged_int32());
-  ASSERT(left.is_untagged_int32());
-  // Registers containing an int32 value are not multiply used.
-  ASSERT(!left.is_register() || !frame_->is_used(left.reg()));
-  ASSERT(!right.is_register() || !frame_->is_used(right.reg()));
-
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-    case Token::AND:
-      UNREACHABLE();
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      if (left.is_constant() || right.is_constant()) {
-        int32_t value;  // Put constant in value, non-constant in left.
-        // Constants are known to be int32 values, from static analysis,
-        // or else will be converted to int32 by implicit ECMA [[ToInt32]].
-        if (left.is_constant()) {
-          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
-          value = NumberToInt32(*left.handle());
-          left = right;
-        } else {
-          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
-          value = NumberToInt32(*right.handle());
-        }
-
-        left.ToRegister();
-        if (op == Token::BIT_OR) {
-          __ or_(Operand(left.reg()), Immediate(value));
-        } else if (op == Token::BIT_XOR) {
-          __ xor_(Operand(left.reg()), Immediate(value));
-        } else {
-          ASSERT(op == Token::BIT_AND);
-          __ and_(Operand(left.reg()), Immediate(value));
-        }
-      } else {
-        ASSERT(left.is_register());
-        ASSERT(right.is_register());
-        if (op == Token::BIT_OR) {
-          __ or_(left.reg(), Operand(right.reg()));
-        } else if (op == Token::BIT_XOR) {
-          __ xor_(left.reg(), Operand(right.reg()));
-        } else {
-          ASSERT(op == Token::BIT_AND);
-          __ and_(left.reg(), Operand(right.reg()));
-        }
-      }
-      frame_->Push(&left);
-      right.Unuse();
-      break;
-    case Token::SAR:
-    case Token::SHL:
-    case Token::SHR: {
-      bool test_shr_overflow = false;
-      left.ToRegister();
-      if (right.is_constant()) {
-        ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
-        int shift_amount = NumberToInt32(*right.handle()) & 0x1F;
-        if (op == Token::SAR) {
-          __ sar(left.reg(), shift_amount);
-        } else if (op == Token::SHL) {
-          __ shl(left.reg(), shift_amount);
-        } else {
-          ASSERT(op == Token::SHR);
-          __ shr(left.reg(), shift_amount);
-          if (shift_amount == 0) test_shr_overflow = true;
-        }
-      } else {
-        // Move right to ecx
-        if (left.is_register() && left.reg().is(ecx)) {
-          right.ToRegister();
-          __ xchg(left.reg(), right.reg());
-          left = right;  // Left is unused here, copy of right unused by Push.
-        } else {
-          right.ToRegister(ecx);
-          left.ToRegister();
-        }
-        if (op == Token::SAR) {
-          __ sar_cl(left.reg());
-        } else if (op == Token::SHL) {
-          __ shl_cl(left.reg());
-        } else {
-          ASSERT(op == Token::SHR);
-          __ shr_cl(left.reg());
-          test_shr_overflow = true;
-        }
-      }
-      {
-        Register left_reg = left.reg();
-        frame_->Push(&left);
-        right.Unuse();
-        if (test_shr_overflow && !node->to_int32()) {
-          // Uint32 results with top bit set are not Int32 values.
-          // If they will be forced to Int32, skip the test.
-          // Test is needed because shr with shift amount 0 does not set flags.
-          __ test(left_reg, Operand(left_reg));
-          unsafe_bailout_->Branch(sign);
-        }
-      }
-      break;
-    }
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-      if ((left.is_constant() && op != Token::SUB) || right.is_constant()) {
-        int32_t value;  // Put constant in value, non-constant in left.
-        if (right.is_constant()) {
-          ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber());
-          value = NumberToInt32(*right.handle());
-        } else {
-          ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber());
-          value = NumberToInt32(*left.handle());
-          left = right;
-        }
-
-        left.ToRegister();
-        if (op == Token::ADD) {
-          __ add(Operand(left.reg()), Immediate(value));
-        } else if (op == Token::SUB) {
-          __ sub(Operand(left.reg()), Immediate(value));
-        } else {
-          ASSERT(op == Token::MUL);
-          __ imul(left.reg(), left.reg(), value);
-        }
-      } else {
-        left.ToRegister();
-        ASSERT(left.is_register());
-        ASSERT(right.is_register());
-        if (op == Token::ADD) {
-          __ add(left.reg(), Operand(right.reg()));
-        } else if (op == Token::SUB) {
-          __ sub(left.reg(), Operand(right.reg()));
-        } else {
-          ASSERT(op == Token::MUL);
-          // We have statically verified that a negative zero can be ignored.
-          __ imul(left.reg(), Operand(right.reg()));
-        }
-      }
-      right.Unuse();
-      frame_->Push(&left);
-      if (!node->to_int32() || op == Token::MUL) {
-        // If ToInt32 is called on the result of ADD, SUB, we don't
-        // care about overflows.
-        // Result of MUL can be non-representable precisely in double so
-        // we have to check for overflow.
-        unsafe_bailout_->Branch(overflow);
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD: {
-      if (right.is_register() && (right.reg().is(eax) || right.reg().is(edx))) {
-        if (left.is_register() && left.reg().is(edi)) {
-          right.ToRegister(ebx);
-        } else {
-          right.ToRegister(edi);
-        }
-      }
-      left.ToRegister(eax);
-      Result edx_reg = allocator_->Allocate(edx);
-      right.ToRegister();
-      // The results are unused here because BreakTarget::Branch cannot handle
-      // live results.
-      Register right_reg = right.reg();
-      left.Unuse();
-      right.Unuse();
-      edx_reg.Unuse();
-      __ cmp(right_reg, 0);
-      // Ensure divisor is positive: no chance of non-int32 or -0 result.
-      unsafe_bailout_->Branch(less_equal);
-      __ cdq();  // Sign-extend eax into edx:eax
-      __ idiv(right_reg);
-      if (op == Token::MOD) {
-        // Negative zero can arise as a negative divident with a zero result.
-        if (!node->no_negative_zero()) {
-          Label not_negative_zero;
-          __ test(edx, Operand(edx));
-          __ j(not_zero, &not_negative_zero);
-          __ test(eax, Operand(eax));
-          unsafe_bailout_->Branch(negative);
-          __ bind(&not_negative_zero);
-        }
-        Result edx_result(edx, TypeInfo::Integer32());
-        edx_result.set_untagged_int32(true);
-        frame_->Push(&edx_result);
-      } else {
-        ASSERT(op == Token::DIV);
-        __ test(edx, Operand(edx));
-        unsafe_bailout_->Branch(not_equal);
-        Result eax_result(eax, TypeInfo::Integer32());
-        eax_result.set_untagged_int32(true);
-        frame_->Push(&eax_result);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not
-  // control flow), we force the right hand side to do the same. This
-  // is necessary because we assume that if we get control flow on the
-  // last path out of an expression we got it on all paths.
-  if (node->op() == Token::AND) {
-    ASSERT(!in_safe_int32_mode());
-    JumpTarget is_true;
-    ControlDestination dest(&is_true, destination()->false_target(), true);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The current false target was used as the fall-through.  If
-      // there are no dangling jumps to is_true then the left
-      // subexpression was unconditionally false.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_true.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current false target was a forward jump then we have a
-        // valid frame, we have just bound the false target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->false_target()->Unuse();
-          destination()->false_target()->Jump();
-        }
-        is_true.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have actually just jumped to or bound the current false
-        // target but the current control destination is not marked as
-        // used.
-        destination()->Use(false);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_true
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_true
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&pop_and_continue, &exit, true);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    ASSERT(!in_safe_int32_mode());
-    JumpTarget is_false;
-    ControlDestination dest(destination()->true_target(), &is_false, false);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.true_was_fall_through()) {
-      // The current true target was used as the fall-through.  If
-      // there are no dangling jumps to is_false then the left
-      // subexpression was unconditionally true.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_false.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current true target was a forward jump then we have a
-        // valid frame, we have just bound the true target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->true_target()->Unuse();
-          destination()->true_target()->Jump();
-        }
-        is_false.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have just jumped to or bound the current true target but
-        // the current control destination is not marked as used.
-        destination()->Use(true);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_false
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_false
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&exit, &pop_and_continue, false);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else if (in_safe_int32_mode()) {
-    Visit(node->left());
-    Visit(node->right());
-    Int32BinaryOperation(node);
-  } else {
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    OverwriteMode overwrite_mode = NO_OVERWRITE;
-    if (node->left()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_LEFT;
-    } else if (node->right()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_RIGHT;
-    }
-
-    if (node->left()->IsTrivial()) {
-      Load(node->right());
-      Result right = frame_->Pop();
-      frame_->Push(node->left());
-      frame_->Push(&right);
-    } else {
-      Load(node->left());
-      Load(node->right());
-    }
-    GenericBinaryOperation(node, overwrite_mode);
-  }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  ASSERT(!in_safe_int32_mode());
-  frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  bool left_already_loaded = false;
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(String::cast(*right->AsLiteral()->handle()));
-
-    // Load the operand and move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Result answer = frame_->Pop();
-    answer.ToRegister();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->true_target()->Branch(zero);
-      frame_->Spill(answer.reg());
-      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ cmp(answer.reg(), FACTORY->heap_number_map());
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-
-      // It can be an undetectable string object.
-      Result temp = allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-                1 << Map::kIsUndetectable);
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(temp.reg(), FIRST_NONSTRING_TYPE);
-      temp.Unuse();
-      answer.Unuse();
-      destination()->Split(below);
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ cmp(answer.reg(), FACTORY->true_value());
-      destination()->true_target()->Branch(equal);
-      __ cmp(answer.reg(), FACTORY->false_value());
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ cmp(answer.reg(), FACTORY->undefined_value());
-      destination()->true_target()->Branch(equal);
-
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-
-      // It can be an undetectable object.
-      frame_->Spill(answer.reg());
-      __ mov(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ test_b(FieldOperand(answer.reg(), Map::kBitFieldOffset),
-                1 << Map::kIsUndetectable);
-      answer.Unuse();
-      destination()->Split(not_zero);
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-      frame_->Spill(answer.reg());
-      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
-      destination()->true_target()->Branch(equal);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
-      answer.Unuse();
-      destination()->Split(equal);
-    } else if (check->Equals(HEAP->object_symbol())) {
-      __ test(answer.reg(), Immediate(kSmiTagMask));
-      destination()->false_target()->Branch(zero);
-      __ cmp(answer.reg(), FACTORY->null_value());
-      destination()->true_target()->Branch(equal);
-
-      Result map = allocator()->Allocate();
-      ASSERT(map.is_valid());
-      // Regular expressions are typeof == 'function', not 'object'.
-      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg());
-      destination()->false_target()->Branch(equal);
-
-      // It can be an undetectable object.
-      __ test_b(FieldOperand(map.reg(), Map::kBitFieldOffset),
-                1 << Map::kIsUndetectable);
-      destination()->false_target()->Branch(not_zero);
-      // Do a range test for JSObject type.  We can't use
-      // MacroAssembler::IsInstanceJSObjectType, because we are using a
-      // ControlDestination, so we copy its implementation here.
-      __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
-      __ sub(Operand(map.reg()), Immediate(FIRST_JS_OBJECT_TYPE));
-      __ cmp(map.reg(), LAST_JS_OBJECT_TYPE - FIRST_JS_OBJECT_TYPE);
-      answer.Unuse();
-      map.Unuse();
-      destination()->Split(below_equal);
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      answer.Unuse();
-      destination()->Goto(false);
-    }
-    return;
-  } else if (op == Token::LT &&
-             right->AsLiteral() != NULL &&
-             right->AsLiteral()->handle()->IsHeapNumber()) {
-    Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
-    if (check->value() == 2147483648.0) {  // 0x80000000.
-      Load(left);
-      left_already_loaded = true;
-      Result lhs = frame_->Pop();
-      lhs.ToRegister();
-      __ test(lhs.reg(), Immediate(kSmiTagMask));
-      destination()->true_target()->Branch(zero);  // All Smis are less.
-      Result scratch = allocator()->Allocate();
-      ASSERT(scratch.is_valid());
-      __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
-      __ cmp(scratch.reg(), FACTORY->heap_number_map());
-      JumpTarget not_a_number;
-      not_a_number.Branch(not_equal, &lhs);
-      __ mov(scratch.reg(),
-             FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
-      __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
-      not_a_number.Branch(above_equal, &lhs);  // It's a negative NaN or -Inf.
-      const uint32_t borderline_exponent =
-          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
-      scratch.Unuse();
-      lhs.Unuse();
-      destination()->true_target()->Branch(less);
-      destination()->false_target()->Jump();
-
-      not_a_number.Bind(&lhs);
-      frame_->Push(&lhs);
-    }
-  }
-
-  Condition cc = no_condition;
-  bool strict = false;
-  switch (op) {
-    case Token::EQ_STRICT:
-      strict = true;
-      // Fall through
-    case Token::EQ:
-      cc = equal;
-      break;
-    case Token::LT:
-      cc = less;
-      break;
-    case Token::GT:
-      cc = greater;
-      break;
-    case Token::LTE:
-      cc = less_equal;
-      break;
-    case Token::GTE:
-      cc = greater_equal;
-      break;
-    case Token::IN: {
-      if (!left_already_loaded) Load(left);
-      Load(right);
-      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
-      frame_->Push(&answer);  // push the result
-      return;
-    }
-    case Token::INSTANCEOF: {
-      if (!left_already_loaded) Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      Result answer = frame_->CallStub(&stub, 2);
-      answer.ToRegister();
-      __ test(answer.reg(), Operand(answer.reg()));
-      answer.Unuse();
-      destination()->Split(zero);
-      return;
-    }
-    default:
-      UNREACHABLE();
-  }
-
-  if (left->IsTrivial()) {
-    if (!left_already_loaded) {
-      Load(right);
-      Result right_result = frame_->Pop();
-      frame_->Push(left);
-      frame_->Push(&right_result);
-    } else {
-      Load(right);
-    }
-  } else {
-    if (!left_already_loaded) Load(left);
-    Load(right);
-  }
-  Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  ASSERT(!in_safe_int32_mode());
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Result operand = frame_->Pop();
-  operand.ToRegister();
-  __ cmp(operand.reg(), FACTORY->null_value());
-  if (node->is_strict()) {
-    operand.Unuse();
-    destination()->Split(equal);
-  } else {
-    // The 'null' value is only equal to 'undefined' if using non-strict
-    // comparisons.
-    destination()->true_target()->Branch(equal);
-    __ cmp(operand.reg(), FACTORY->undefined_value());
-    destination()->true_target()->Branch(equal);
-    __ test(operand.reg(), Immediate(kSmiTagMask));
-    destination()->false_target()->Branch(equal);
-
-    // It can be an undetectable object.
-    // Use a scratch register in preference to spilling operand.reg().
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(),
-           FieldOperand(operand.reg(), HeapObject::kMapOffset));
-    __ test_b(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-              1 << Map::kIsUndetectable);
-    temp.Unuse();
-    operand.Unuse();
-    destination()->Split(not_zero);
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  return (allocator()->count(eax) == (frame()->is_used(eax) ? 1 : 0))
-      && (allocator()->count(ebx) == (frame()->is_used(ebx) ? 1 : 0))
-      && (allocator()->count(ecx) == (frame()->is_used(ecx) ? 1 : 0))
-      && (allocator()->count(edx) == (frame()->is_used(edx) ? 1 : 0))
-      && (allocator()->count(edi) == (frame()->is_used(edi) ? 1 : 0));
-}
-#endif
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceGetNamedValue(Register dst,
-                                 Register receiver,
-                                 Handle<String> name,
-                                 bool is_contextual)
-      : dst_(dst),
-        receiver_(receiver),
-        name_(name),
-        is_contextual_(is_contextual),
-        is_dont_delete_(false) {
-    set_comment(is_contextual
-                ? "[ DeferredReferenceGetNamedValue (contextual)"
-                : "[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
-  void set_is_dont_delete(bool value) {
-    ASSERT(is_contextual_);
-    is_dont_delete_ = value;
-  }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Handle<String> name_;
-  bool is_contextual_;
-  bool is_dont_delete_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  if (!receiver_.is(eax)) {
-    __ mov(eax, receiver_);
-  }
-  __ Set(ecx, Immediate(name_));
-  Handle<Code> ic(masm()->isolate()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  RelocInfo::Mode mode = is_contextual_
-      ? RelocInfo::CODE_TARGET_CONTEXT
-      : RelocInfo::CODE_TARGET;
-  __ call(ic, mode);
-  // The call must be followed by:
-  // - a test eax instruction to indicate that the inobject property
-  //   case was inlined.
-  // - a mov ecx or mov edx instruction to indicate that the
-  //   contextual property load was inlined.
-  //
-  // Store the delta to the map check instruction here in the test
-  // instruction.  Use masm_-> instead of the __ macro since the
-  // latter can't return a value.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  Counters* counters = masm()->isolate()->counters();
-  if (is_contextual_) {
-    masm_->mov(is_dont_delete_ ? edx : ecx, -delta_to_patch_site);
-    __ IncrementCounter(counters->named_load_global_inline_miss(), 1);
-    if (is_dont_delete_) {
-      __ IncrementCounter(counters->dont_delete_hint_miss(), 1);
-    }
-  } else {
-    masm_->test(eax, Immediate(-delta_to_patch_site));
-    __ IncrementCounter(counters->named_load_inline_miss(), 1);
-  }
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetKeyedValue(Register dst,
-                                          Register receiver,
-                                          Register key)
-      : dst_(dst), receiver_(receiver), key_(key) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  if (!receiver_.is(eax)) {
-    // Register eax is available for key.
-    if (!key_.is(eax)) {
-      __ mov(eax, key_);
-    }
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-  } else if (!key_.is(edx)) {
-    // Register edx is available for receiver.
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-    if (!key_.is(eax)) {
-      __ mov(eax, key_);
-    }
-  } else {
-    __ xchg(edx, eax);
-  }
-  // Calculate the delta from the IC call instruction to the map check
-  // cmp instruction in the inlined version.  This delta is stored in
-  // a test(eax, delta) instruction after the call so that we can find
-  // it in the IC initialization code and patch the cmp instruction.
-  // This means that we cannot allow test instructions after calls to
-  // KeyedLoadIC stubs in other places.
-  Handle<Code> ic(masm()->isolate()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the __
-  // macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->test(eax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
-  if (!dst_.is(eax)) __ mov(dst_, eax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 Register scratch,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        scratch_(scratch),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  Register scratch_;
-  Label patch_site_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
-  // Move value_ to eax, key_ to ecx, and receiver_ to edx.
-  Register old_value = value_;
-
-  // First, move value to eax.
-  if (!value_.is(eax)) {
-    if (key_.is(eax)) {
-      // Move key_ out of eax, preferably to ecx.
-      if (!value_.is(ecx) && !receiver_.is(ecx)) {
-        __ mov(ecx, key_);
-        key_ = ecx;
-      } else {
-        __ mov(scratch_, key_);
-        key_ = scratch_;
-      }
-    }
-    if (receiver_.is(eax)) {
-      // Move receiver_ out of eax, preferably to edx.
-      if (!value_.is(edx) && !key_.is(edx)) {
-        __ mov(edx, receiver_);
-        receiver_ = edx;
-      } else {
-        // Both moves to scratch are from eax, also, no valid path hits both.
-        __ mov(scratch_, receiver_);
-        receiver_ = scratch_;
-      }
-    }
-    __ mov(eax, value_);
-    value_ = eax;
-  }
-
-  // Now value_ is in eax.  Move the other two to the right positions.
-  // We do not update the variables key_ and receiver_ to ecx and edx.
-  if (key_.is(ecx)) {
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-  } else if (key_.is(edx)) {
-    if (receiver_.is(ecx)) {
-      __ xchg(edx, ecx);
-    } else {
-      __ mov(ecx, key_);
-      if (!receiver_.is(edx)) {
-        __ mov(edx, receiver_);
-      }
-    }
-  } else {  // Key is not in edx or ecx.
-    if (!receiver_.is(edx)) {
-      __ mov(edx, receiver_);
-    }
-    __ mov(ecx, key_);
-  }
-
-  // Call the IC stub.
-  Handle<Code> ic(masm()->isolate()->builtins()->builtin(
-      (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                    : Builtins::kKeyedStoreIC_Initialize));
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the
-  // __ macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->test(eax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC) register.
-  if (!old_value.is(eax)) __ mov(old_value, eax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-
-  Isolate* isolate = masm()->isolate();
-  Factory* factory = isolate->factory();
-  Counters* counters = isolate->counters();
-
-  bool contextual_load_in_builtin =
-      is_contextual &&
-      (isolate->bootstrapper()->IsActive() ||
-       (!info_->closure().is_null() && info_->closure()->IsBuiltin()));
-
-  Result result;
-  // Do not inline in the global code or when not in loop.
-  if (scope()->is_global_scope() ||
-      loop_nesting() == 0 ||
-      contextual_load_in_builtin) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    frame()->Push(name);
-
-    RelocInfo::Mode mode = is_contextual
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    result = frame()->CallLoadIC(mode);
-    // A test eax instruction following the call signals that the inobject
-    // property case was inlined.  Ensure that there is not a test eax
-    // instruction here.
-    __ nop();
-  } else {
-    // Inline the property load.
-    Comment cmnt(masm(), is_contextual
-                         ? "[ Inlined contextual property load"
-                         : "[ Inlined named property load");
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(),
-                                           receiver.reg(),
-                                           name,
-                                           is_contextual);
-
-    if (!is_contextual) {
-      // Check that the receiver is a heap object.
-      __ test(receiver.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(zero);
-    }
-
-    __ bind(deferred->patch_site());
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                Immediate(factory->null_value()));
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    deferred->Branch(not_equal);
-
-    // The delta from the patch label to the actual load must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-           LoadIC::kOffsetToLoadInstruction);
-
-    if (is_contextual) {
-      // Load the (initialy invalid) cell and get its value.
-      masm()->mov(result.reg(), factory->null_value());
-      if (FLAG_debug_code) {
-        __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
-               factory->global_property_cell_map());
-        __ Assert(equal, "Uninitialized inlined contextual load");
-      }
-      __ mov(result.reg(),
-             FieldOperand(result.reg(), JSGlobalPropertyCell::kValueOffset));
-      __ cmp(result.reg(), factory->the_hole_value());
-      deferred->Branch(equal);
-      bool is_dont_delete = false;
-      if (!info_->closure().is_null()) {
-        // When doing lazy compilation we can check if the global cell
-        // already exists and use its "don't delete" status as a hint.
-        AssertNoAllocation no_gc;
-        v8::internal::GlobalObject* global_object =
-            info_->closure()->context()->global();
-        LookupResult lookup;
-        global_object->LocalLookupRealNamedProperty(*name, &lookup);
-        if (lookup.IsProperty() && lookup.type() == NORMAL) {
-          ASSERT(lookup.holder() == global_object);
-          ASSERT(global_object->property_dictionary()->ValueAt(
-              lookup.GetDictionaryEntry())->IsJSGlobalPropertyCell());
-          is_dont_delete = lookup.IsDontDelete();
-        }
-      }
-      deferred->set_is_dont_delete(is_dont_delete);
-      if (!is_dont_delete) {
-        __ cmp(result.reg(), factory->the_hole_value());
-        deferred->Branch(equal);
-      } else if (FLAG_debug_code) {
-        __ cmp(result.reg(), factory->the_hole_value());
-        __ Check(not_equal, "DontDelete cells can't contain the hole");
-      }
-      __ IncrementCounter(counters->named_load_global_inline(), 1);
-      if (is_dont_delete) {
-        __ IncrementCounter(counters->dont_delete_hint_hit(), 1);
-      }
-    } else {
-      // The initial (invalid) offset has to be large enough to force a 32-bit
-      // instruction encoding to allow patching with an arbitrary offset.  Use
-      // kMaxInt (minus kHeapObjectTag).
-      int offset = kMaxInt;
-      masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
-      __ IncrementCounter(counters->named_load_inline(), 1);
-    }
-
-    deferred->BindExit();
-  }
-  ASSERT(frame()->height() == original_height - 1);
-  return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // A test eax instruction following the call signals that the inobject
-    // property case was inlined.  Ensure that there is not a test eax
-    // instruction here.
-    __ nop();
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-    Label patch_site;
-
-    // Get the value and receiver from the stack.
-    Result value = frame()->Pop();
-    value.ToRegister();
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    // Allocate result register.
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
-    // Check that the receiver is a heap object.
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    slow.Branch(zero, &value, &receiver);
-
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    __ bind(&patch_site);
-    masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                Immediate(FACTORY->null_value()));
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    slow.Branch(not_equal, &value, &receiver);
-
-    // The delta from the patch label to the store offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
-           StoreIC::kOffsetToStoreInstruction);
-
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    __ mov(FieldOperand(receiver.reg(), offset), value.reg());
-    __ mov(result.reg(), Operand(value.reg()));
-
-    // Allocate scratch register for write barrier.
-    Result scratch = allocator()->Allocate();
-    ASSERT(scratch.is_valid());
-
-    // The write barrier clobbers all input registers, so spill the
-    // receiver and the value.
-    frame_->Spill(receiver.reg());
-    frame_->Spill(value.reg());
-
-    // If the receiver and the value share a register allocate a new
-    // register for the receiver.
-    if (receiver.reg().is(value.reg())) {
-      receiver = allocator()->Allocate();
-      ASSERT(receiver.is_valid());
-      __ mov(receiver.reg(), Operand(value.reg()));
-    }
-
-    // Update the write barrier. To save instructions in the inlined
-    // version we do not filter smis.
-    Label skip_write_barrier;
-    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
-    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
-    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
-    if (FLAG_debug_code) {
-      __ mov(receiver.reg(), Immediate(BitCast<int32_t>(kZapValue)));
-      __ mov(value.reg(), Immediate(BitCast<int32_t>(kZapValue)));
-      __ mov(scratch.reg(), Immediate(BitCast<int32_t>(kZapValue)));
-    }
-    __ bind(&skip_write_barrier);
-    value.Unuse();
-    scratch.Unuse();
-    receiver.Unuse();
-    done.Jump(&result);
-
-    slow.Bind(&value, &receiver);
-    frame()->Push(&receiver);
-    frame()->Push(&value);
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // Encode the offset to the map check instruction and the offset
-    // to the write barrier store address computation in a test eax
-    // instruction.
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ test(eax,
-            Immediate((delta_to_record_write << 16) | delta_to_patch_site));
-    done.Bind(&result);
-  }
-
-  ASSERT_EQ(expected_height, frame()->height());
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Inline array load code if inside of a loop.  We do not know the
-  // receiver map yet, so we initially generate the code with a check
-  // against an invalid map.  In the inline cache code, we patch the map
-  // check if appropriate.
-  if (loop_nesting() > 0) {
-    Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    // If key and receiver are shared registers on the frame, their values will
-    // be automatically saved and restored when going to deferred code.
-    // The result is in elements, which is guaranteed non-shared.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(elements.reg(),
-                                           receiver.reg(),
-                                           key.reg());
-
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(zero);
-
-    // Check that the receiver has the expected map.
-    // Initially, use an invalid map. The map is patched in the IC
-    // initialization code.
-    __ bind(deferred->patch_site());
-    // Use masm-> here instead of the double underscore macro since extra
-    // coverage code can interfere with the patching.
-    masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-               Immediate(FACTORY->null_value()));
-    deferred->Branch(not_equal);
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ test(key.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
-    }
-
-    // Get the elements array from the receiver.
-    __ mov(elements.reg(),
-           FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ AssertFastElements(elements.reg());
-
-    // Check that the key is within bounds.
-    __ cmp(key.reg(),
-           FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Load and check that the result is not the hole.
-    // Key holds a smi.
-    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-    __ mov(elements.reg(),
-           FieldOperand(elements.reg(),
-                        key.reg(),
-                        times_2,
-                        FixedArray::kHeaderSize));
-    result = elements;
-    __ cmp(Operand(result.reg()), Immediate(FACTORY->the_hole_value()));
-    deferred->Branch(equal);
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_load_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    Comment cmnt(masm_, "[ Load from keyed Property");
-    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed load.  The explicit nop instruction is here because
-    // the push that follows might be peep-hole optimized away.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 2);
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
-    // Get the receiver, key and value into registers.
-    result = frame()->Pop();
-    Result key = frame()->Pop();
-    Result receiver = frame()->Pop();
-
-    Result tmp = allocator_->Allocate();
-    ASSERT(tmp.is_valid());
-    Result tmp2 = allocator_->Allocate();
-    ASSERT(tmp2.is_valid());
-
-    // Determine whether the value is a constant before putting it in a
-    // register.
-    bool value_is_constant = result.is_constant();
-
-    // Make sure that value, key and receiver are in registers.
-    result.ToRegister();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(result.reg(),
-                                           key.reg(),
-                                           receiver.reg(),
-                                           tmp.reg(),
-                                           strict_mode_flag());
-
-    // Check that the receiver is not a smi.
-    __ test(receiver.reg(), Immediate(kSmiTagMask));
-    deferred->Branch(zero);
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ test(key.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
-    }
-
-    // Check that the receiver is a JSArray.
-    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
-    deferred->Branch(not_equal);
-
-    // Get the elements array from the receiver and check that it is not a
-    // dictionary.
-    __ mov(tmp.reg(),
-           FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
-    // Check whether it is possible to omit the write barrier. If the elements
-    // array is in new space or the value written is a smi we can safely update
-    // the elements array without write barrier.
-    Label in_new_space;
-    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
-    if (!value_is_constant) {
-      __ test(result.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(not_zero);
-    }
-
-    __ bind(&in_new_space);
-    // Bind the deferred code patch site to be able to locate the fixed
-    // array map comparison.  When debugging, we patch this comparison to
-    // always fail so that we will hit the IC call in the deferred code
-    // which will allow the debugger to break for fast case stores.
-    __ bind(deferred->patch_site());
-    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-           Immediate(FACTORY->fixed_array_map()));
-    deferred->Branch(not_equal);
-
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis (because the fixed array check above ensures the
-    // elements are in fast case). Use unsigned comparison to handle negative
-    // keys.
-    __ cmp(key.reg(),
-           FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Store the value.
-    __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
-    __ IncrementCounter(masm_->isolate()->counters()->keyed_store_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    result = frame()->CallKeyedStoreIC(strict_mode_flag());
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed store.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 3);
-  return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>::cast(raw_name->handle());
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-
-  // Record the source position for the property load.
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      if (!persist_after_get_) set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      if (persist_after_get_) cgen_->frame()->Dup();
-      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
-      if (!persist_after_get_) set_unloaded();
-      cgen_->frame()->Push(&result);
-      break;
-    }
-
-    case KEYED: {
-      if (persist_after_get_) {
-        cgen_->frame()->PushElementAt(1);
-        cgen_->frame()->PushElementAt(1);
-      }
-      Result value = cgen_->EmitKeyedLoad();
-      cgen_->frame()->Push(&value);
-      if (!persist_after_get_) set_unloaded();
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void Reference::TakeValue() {
-  // For non-constant frame-allocated slots, we invalidate the value in the
-  // slot.  For all others, we fall back on GetValue.
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(!is_illegal());
-  if (type_ != SLOT) {
-    GetValue();
-    return;
-  }
-
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-  ASSERT(slot != NULL);
-  if (slot->type() == Slot::LOOKUP ||
-      slot->type() == Slot::CONTEXT ||
-      slot->var()->mode() == Variable::CONST ||
-      slot->is_arguments()) {
-    GetValue();
-    return;
-  }
-
-  // Only non-constant, frame-allocated parameters and locals can
-  // reach here. Be careful not to use the optimizations for arguments
-  // object access since it may not have been initialized yet.
-  ASSERT(!slot->is_arguments());
-  if (slot->type() == Slot::PARAMETER) {
-    cgen_->frame()->TakeParameterAt(slot->index());
-  } else {
-    ASSERT(slot->type() == Slot::LOCAL);
-    cgen_->frame()->TakeLocalAt(slot->index());
-  }
-
-  ASSERT(persist_after_get_);
-  // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      Result answer = cgen_->EmitNamedStore(GetName(), false);
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression()->AsProperty();
-      ASSERT(property != NULL);
-
-      Result answer = cgen_->EmitKeyedStore(property->key()->type());
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case UNLOADED:
-    case ILLEGAL:
-      UNREACHABLE();
-  }
-}
-
-
-#undef __
-
 #define __ masm.
 
-
 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
   memcpy(dest, src, size);
 }
 
 
-MemCopyFunction CreateMemCopyFunction() {
-  HandleScope scope;
-  MacroAssembler masm(NULL, 1 * KB);
+OS::MemCopyFunction CreateMemCopyFunction() {
+  size_t actual_size;
+  // Allocate buffer in executable space.
+  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
+                                                 &actual_size,
+                                                 true));
+  if (buffer == NULL) return &MemCopyWrapper;
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
 
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
@@ -10198,13 +84,13 @@
 
   if (FLAG_debug_code) {
     __ cmp(Operand(esp, kSizeOffset + stack_offset),
-           Immediate(kMinComplexMemCopy));
+           Immediate(OS::kMinComplexMemCopy));
     Label ok;
     __ j(greater_equal, &ok);
     __ int3();
     __ bind(&ok);
   }
-  if (masm.isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope enable(SSE2);
     __ push(edi);
     __ push(esi);
@@ -10232,7 +118,6 @@
     __ test(Operand(src), Immediate(0x0F));
     __ j(not_zero, &unaligned_source);
     {
-      __ IncrementCounter(masm.isolate()->counters()->memcopy_aligned(), 1);
       // Copy loop for aligned source and destination.
       __ mov(edx, count);
       Register loop_count = ecx;
@@ -10280,7 +165,6 @@
       // Copy loop for unaligned source and aligned destination.
       // If source is not aligned, we can't read it as efficiently.
       __ bind(&unaligned_source);
-      __ IncrementCounter(masm.isolate()->counters()->memcopy_unaligned(), 1);
       __ mov(edx, ecx);
       Register loop_count = ecx;
       Register count = edx;
@@ -10324,7 +208,6 @@
     }
 
   } else {
-    __ IncrementCounter(masm.isolate()->counters()->memcopy_noxmm(), 1);
     // SSE2 not supported. Unlikely to happen in practice.
     __ push(edi);
     __ push(esi);
@@ -10371,13 +254,8 @@
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);
 
-  // Copy the generated code into an executable chunk and return a pointer
-  // to the first instruction in it as a C++ function pointer.
-  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
-  if (chunk == NULL) return &MemCopyWrapper;
-  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
-  return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress());
+  CPU::FlushICache(buffer, actual_size);
+  return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
 }
 
 #undef __
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index acd651b..8f090b1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,275 +30,18 @@
 
 #include "ast.h"
 #include "ic-inl.h"
-#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class FrameRegisterState;
-class RegisterAllocator;
-class RegisterFile;
-class RuntimeCallHelper;
-
-
-// -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue and SetValue.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  The value of the reference may be invalidated,
-  // causing subsequent attempts to read it to fail.
-  void TakeValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  // Keep the reference on the stack after get, so it can be used by set later.
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through.  The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally.  Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
-  ControlDestination(JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool true_is_fall_through)
-      : true_target_(true_target),
-        false_target_(false_target),
-        true_is_fall_through_(true_is_fall_through),
-        is_used_(false) {
-    ASSERT(true_is_fall_through ? !true_target->is_bound()
-                                : !false_target->is_bound());
-  }
-
-  // Accessors for the jump targets.  Directly jumping or branching to
-  // or binding the targets will not update the destination's state.
-  JumpTarget* true_target() const { return true_target_; }
-  JumpTarget* false_target() const { return false_target_; }
-
-  // True if the the destination has been jumped to unconditionally or
-  // control has been split to both targets.  This predicate does not
-  // test whether the targets have been extracted and manipulated as
-  // raw jump targets.
-  bool is_used() const { return is_used_; }
-
-  // True if the destination is used and the true target (respectively
-  // false target) was the fall through.  If the target is backward,
-  // "fall through" included jumping unconditionally to it.
-  bool true_was_fall_through() const {
-    return is_used_ && true_is_fall_through_;
-  }
-
-  bool false_was_fall_through() const {
-    return is_used_ && !true_is_fall_through_;
-  }
-
-  // Emit a branch to one of the true or false targets, and bind the
-  // other target.  Because this binds the fall-through target, it
-  // should be emitted in tail position (as the last thing when
-  // compiling an expression).
-  void Split(Condition cc) {
-    ASSERT(!is_used_);
-    if (true_is_fall_through_) {
-      false_target_->Branch(NegateCondition(cc));
-      true_target_->Bind();
-    } else {
-      true_target_->Branch(cc);
-      false_target_->Bind();
-    }
-    is_used_ = true;
-  }
-
-  // Emit an unconditional jump in tail position, to the true target
-  // (if the argument is true) or the false target.  The "jump" will
-  // actually bind the jump target if it is forward, jump to it if it
-  // is backward.
-  void Goto(bool where) {
-    ASSERT(!is_used_);
-    JumpTarget* target = where ? true_target_ : false_target_;
-    if (target->is_bound()) {
-      target->Jump();
-    } else {
-      target->Bind();
-    }
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Mark this jump target as used as if Goto had been called, but
-  // without generating a jump or binding a label (the control effect
-  // should have already happened).  This is used when the left
-  // subexpression of the short-circuit boolean operators are
-  // compiled.
-  void Use(bool where) {
-    ASSERT(!is_used_);
-    ASSERT((where ? true_target_ : false_target_)->is_bound());
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Swap the true and false targets but keep the same actual label as
-  // the fall through.  This is used when compiling negated
-  // expressions, where we want to swap the targets but preserve the
-  // state.
-  void Invert() {
-    JumpTarget* temp_target = true_target_;
-    true_target_ = false_target_;
-    false_target_ = temp_target;
-
-    true_is_fall_through_ = !true_is_fall_through_;
-  }
-
- private:
-  // True and false jump targets.
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-
-  // Before using the destination: true if the true target is the
-  // preferred fall through, false if the false target is.  After
-  // using the destination: true if the true target was actually used
-  // as the fall through, false if the false target was.
-  bool true_is_fall_through_;
-
-  // True if the Split or Goto functions have been called.
-  bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair).  It is threaded through
-// the call stack.  Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own control destination.
-  CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  ~CodeGenState();
-
-  // Accessors for the state.
-  ControlDestination* destination() const { return destination_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-  // A control destination in case the expression has a control-flow
-  // effect.
-  ControlDestination* destination_;
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode.
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
 
 // -------------------------------------------------------------------------
 // CodeGenerator
 
-class CodeGenerator: public AstVisitor {
+class CodeGenerator {
  public:
-  static bool MakeCode(CompilationInfo* info);
-
   // Printing of AST, etc. as requested by flags.
   static void MakeCodePrologue(CompilationInfo* info);
 
@@ -318,33 +61,7 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
 
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
-  bool in_spilled_code() const { return in_spilled_code_; }
-  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
-  // Return a position of the element at |index_as_smi| + |additional_offset|
-  // in FixedArray pointer to which is held in |array|.  |index_as_smi| is Smi.
   static Operand FixedArrayElementOperand(Register array,
                                           Register index_as_smi,
                                           int additional_offset = 0) {
@@ -353,445 +70,6 @@
   }
 
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  // State
-  ControlDestination* destination() const { return state_->destination(); }
-
-  // Control of side-effect-free int32 expression compilation.
-  bool in_safe_int32_mode() { return in_safe_int32_mode_; }
-  void set_in_safe_int32_mode(bool value) { in_safe_int32_mode_ = value; }
-  bool safe_int32_mode_enabled() {
-    return FLAG_safe_int32_compiler && safe_int32_mode_enabled_;
-  }
-  void set_safe_int32_mode_enabled(bool value) {
-    safe_int32_mode_enabled_ = value;
-  }
-  void set_unsafe_bailout(BreakTarget* unsafe_bailout) {
-    unsafe_bailout_ = unsafe_bailout;
-  }
-
-  // Take the Result that is an untagged int32, and convert it to a tagged
-  // Smi or HeapNumber.  Remove the untagged_int32 flag from the result.
-  void ConvertInt32ResultToNumber(Result* value);
-  void ConvertInt32ResultToSmi(Result* value);
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Visit a statement and then spill the virtual frame if control flow can
-  // reach the end of the statement (ie, it does not exit via break,
-  // continue, return, or throw).  This function is used temporarily while
-  // the code generator is being transformed.
-  void VisitAndSpill(Statement* statement);
-
-  // Visit a list of statements and then spill the virtual frame if control
-  // flow can reach the end of the list.
-  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).
-  void GenerateReturnSequence(Result* return_value);
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  Result StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-
-  Operand SlotOperand(Slot* slot, Register tmp);
-
-  Operand ContextSlotOperandCheckExtensions(Slot* slot,
-                                            Result tmp,
-                                            JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* expr,
-                     ControlDestination* destination,
-                     bool force_control);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver();
-
-  // Generate code to push the value of an expression on top of the frame
-  // and then spill the frame fully to memory.  This function is used
-  // temporarily while the code generator is being transformed.
-  void LoadAndSpill(Expression* expression);
-
-  // Evaluate an expression and place its value on top of the frame,
-  // using, or not using, the side-effect-free expression compiler.
-  void LoadInSafeInt32Mode(Expression* expr, BreakTarget* unsafe_bailout);
-  void LoadWithSafeInt32ModeDisabled(Expression* expr);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
-  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                           TypeofState typeof_state,
-                                           JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       Result* result,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the expression stack into a slot, leaving the
-  // value in place.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Receiver is passed on the frame and consumed.
-  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // If the store is contextual, value is passed on the frame and consumed.
-  // Otherwise, receiver and value are passed on the frame and consumed.
-  Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Receiver and key are passed on the frame and consumed.
-  Result EmitKeyedLoad();
-
-  // Receiver, key, and value are passed on the frame and consumed.
-  Result EmitKeyedStore(StaticType* key_type);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Translate the value on top of the frame into control flow to the
-  // control destination.
-  void ToBoolean(ControlDestination* destination);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(BinaryOperation* expr,
-                              OverwriteMode overwrite_mode);
-
-  // Emits code sequence that jumps to a JumpTarget if the inputs
-  // are both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  // Allocates a temporary register, possibly spilling from the frame,
-  // if it needs to check both left and right.
-  void JumpIfBothSmiUsingTypeInfo(Result* left,
-                                  Result* right,
-                                  JumpTarget* both_smi);
-
-  // Emits code sequence that jumps to deferred code if the inputs
-  // are not both smis.  Cannot be in MacroAssembler because it takes
-  // a deferred code object.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     Register scratch,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     DeferredCode* deferred);
-
-  // Emits code sequence that jumps to the label if the inputs
-  // are not both smis.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     Register scratch,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     Label* on_non_smi);
-
-  // If possible, combine two constant smi values using op to produce
-  // a smi result, and push it on the virtual frame, all at compile time.
-  // Returns true if it succeeds.  Otherwise it has no effect.
-  bool FoldConstantSmis(Token::Value op, int left, int right);
-
-  // Emit code to perform a binary operation on a constant
-  // smi and a likely smi.  Consumes the Result operand.
-  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                    Result* operand,
-                                    Handle<Object> constant_operand,
-                                    bool reversed,
-                                    OverwriteMode overwrite_mode);
-
-  // Emit code to perform a binary operation on two likely smis.
-  // The code to handle smi arguments is produced inline.
-  // Consumes the Results left and right.
-  Result LikelySmiBinaryOperation(BinaryOperation* expr,
-                                  Result* left,
-                                  Result* right,
-                                  OverwriteMode overwrite_mode);
-
-
-  // Emit code to perform a binary operation on two untagged int32 values.
-  // The values are on top of the frame, and the result is pushed on the frame.
-  void Int32BinaryOperation(BinaryOperation* node);
-
-
-  // Generate a stub call from the virtual frame.
-  Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                         Result* left,
-                                         Result* right);
-
-  void Comparison(AstNode* node,
-                  Condition cc,
-                  bool strict,
-                  ControlDestination* destination);
-
-  // If at least one of the sides is a constant smi, generate optimized code.
-  void ConstantSmiComparison(Condition cc,
-                             bool strict,
-                             ControlDestination* destination,
-                             Result* left_side,
-                             Result* right_side,
-                             bool left_side_constant_smi,
-                             bool right_side_constant_smi,
-                             bool is_loop_condition);
-
-  void GenerateInlineNumberComparison(Result* left_side,
-                                      Result* right_side,
-                                      Condition cc,
-                                      ControlDestination* dest);
-
-  // To prevent long attacker-controlled byte sequences, integer constants
-  // from the JavaScript source are loaded in two parts if they are larger
-  // than 17 bits.
-  static const int kMaxSmiInlinedBits = 17;
-  bool IsUnsafeSmi(Handle<Object> value);
-  // Load an integer constant x into a register target or into the stack using
-  // at most 16 bits of user-controlled data per assembly operation.
-  void MoveUnsafeSmi(Register target, Handle<Object> value);
-  void StoreUnsafeSmiToLocal(int offset, Handle<Object> value);
-  void PushUnsafeSmi(Handle<Object> value);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  Result InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                             bool pretenure);
-
-  // Support for types.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  // Construct a RegExp exec result with two in-object properties.
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements. Takes three expressions, the object and two
-  // indices. This should only be used if the indices are known to be
-  // non-negative and within bounds of the elements array at the call site.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  // Check whether two RegExps are equivalent.
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* stmt);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.  There should
-  // be no frame-external references to (non-reserved) registers.
-  bool HasValidEntryRegisters();
-#endif
-
-  ZoneList<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  CodeGenState* state_;
-  int loop_nesting_;
-  bool in_safe_int32_mode_;
-  bool safe_int32_mode_enabled_;
-
-  // Jump targets.
-  // The target of the return from the function.
-  BreakTarget function_return_;
-  // The target of the bailout from a side-effect-free int32 subexpression.
-  BreakTarget* unsafe_bailout_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  // True when we are in code that expects the virtual frame to be fully
-  // spilled.  Some virtual frame function are disabled in DEBUG builds when
-  // called from spilled code, because they do not leave the virtual frame
-  // in a spilled state.
-  bool in_spilled_code_;
-
-  // A cookie that is used for JIT IMM32 Encoding.  Initialized to a
-  // random number when the command-line
-  // FLAG_mask_constants_with_cookie is true, zero otherwise.
-  int jit_cookie_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class Result;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-  friend class LCodeGen;
-
-  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
-  friend class InlineRuntimeFunctionsTable;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 286ed7b..615dbfe 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -42,12 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
-  cpu_features->Clear();
-  cpu_features->Probe(true);
-  if (!cpu_features->IsSupported(SSE2) || Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return CpuFeatures::IsSupported(SSE2);
 }
 
 
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 33c5251..2389948 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index c6342d7..72fdac8 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -641,14 +641,16 @@
   __ neg(edx);
 
   // Allocate a new deoptimizer object.
-  __ PrepareCallCFunction(5, eax);
+  __ PrepareCallCFunction(6, eax);
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ mov(Operand(esp, 0 * kPointerSize), eax);  // Function.
   __ mov(Operand(esp, 1 * kPointerSize), Immediate(type()));  // Bailout type.
   __ mov(Operand(esp, 2 * kPointerSize), ebx);  // Bailout id.
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Code address or 0.
   __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ mov(Operand(esp, 5 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
 
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 8084694..0f95abd 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -108,7 +108,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 16c39c5..69d5e77 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -231,7 +231,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       NearLabel ok;
       ExternalReference stack_limit =
           ExternalReference::address_of_stack_limit(isolate());
@@ -773,7 +773,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -801,7 +801,7 @@
       __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ jmp(clause->body_target()->entry_label());
+      __ jmp(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -812,7 +812,7 @@
     __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ jmp(clause->body_target()->entry_label());
+    __ jmp(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -822,14 +822,14 @@
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_target());
   } else {
-    __ jmp(default_clause->body_target()->entry_label());
+    __ jmp(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -1563,27 +1563,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2268,15 +2267,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the full code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2458,10 +2448,73 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // TODO(3110205): Implement this.
-  // Currently unimplemented.  Emit false, a safe choice.
+  if (FLAG_debug_code) __ AbortIfSmi(eax);
+
+  // Check whether this map has already been checked to be safe for default
+  // valueOf.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
+            1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ j(not_zero, if_true);
+
+  // Check for fast case object. Return false for slow case objects.
+  __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ cmp(ecx, FACTORY->hash_table_map());
+  __ j(equal, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ mov(ebx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
+  __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  // ebx: descriptor array
+  // ecx: length of descriptor array
+  // Calculate the end of the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kPointerSize == 4);
+  __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+  // Calculate location of the first key name.
+  __ add(Operand(ebx),
+           Immediate(FixedArray::kHeaderSize +
+                     DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(edx, FieldOperand(ebx, 0));
+  __ cmp(edx, FACTORY->value_of_symbol());
+  __ j(equal, if_false);
+  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ bind(&entry);
+  __ cmp(ebx, Operand(ecx));
+  __ j(not_equal, &loop);
+
+  // Reload map as register ebx was used as temporary above.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ mov(edx,
+         FieldOperand(edx, GlobalObject::kGlobalContextOffset));
+  __ cmp(ecx,
+         ContextOperand(edx,
+                        Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, if_false);
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ or_(FieldOperand(ebx, Map::kBitField2Offset),
+         Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2717,15 +2770,16 @@
 
   __ bind(&heapnumber_allocated);
 
-  __ PrepareCallCFunction(0, ebx);
+  __ PrepareCallCFunction(1, ebx);
+  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
-                   0);
+                   1);
 
   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   // This is implemented on both SSE2 and FPU.
-  if (isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
     __ movd(xmm1, Operand(ebx));
@@ -2800,7 +2854,7 @@
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
-  if (isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (CpuFeatures::IsSupported(SSE2)) {
     MathPowStub stub;
     __ CallStub(&stub);
   } else {
@@ -3033,15 +3087,14 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; ++i) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in edi. Move it in there.
-  if (!result_register().is(edi)) __ mov(edi, result_register());
+  // InvokeFunction requires the function in edi. Move it in there.
+  __ mov(edi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(edi, count, CALL_FUNCTION);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -3778,7 +3831,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
@@ -4196,30 +4253,7 @@
     default:
       break;
   }
-
   __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
 }
 
 
@@ -4240,7 +4274,6 @@
     default:
       break;
   }
-
   __ call(ic, RelocInfo::CODE_TARGET);
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 48ffc73..4106f01 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
@@ -371,12 +371,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the
-// inlined load instruction.  It is 7 bytes (test eax, imm) plus
-// 6 bytes (jne slow_label).
-const int LoadIC::kOffsetToLoadInstruction = 13;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -1273,172 +1267,6 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test eax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 4 bytes of the 7-byte
-  // operand-immediate compare instruction, so we add 3 to get the
-  // offset to the last 4 bytes.
-  Address map_address = test_instruction_address + delta + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the last 4 bytes of a six byte
-  // memory-to-register move instruction, so we add 2 to get the
-  // offset to the last 4 bytes.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 2;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-// One byte opcode for mov ecx,0xXXXXXXXX.
-// Marks inlined contextual loads using all kinds of cells. Generated
-// code has the hole check:
-//   mov reg, <cell>
-//   mov reg, (<cell>, value offset)
-//   cmp reg, <the hole>
-//   je  slow
-//   ;; use reg
-static const byte kMovEcxByte = 0xB9;
-
-// One byte opcode for mov edx,0xXXXXXXXX.
-// Marks inlined contextual loads using only "don't delete"
-// cells. Generated code doesn't have the hole check:
-//   mov reg, <cell>
-//   mov reg, (<cell>, value offset)
-//   ;; use reg
-static const byte kMovEdxByte = 0xBA;
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address mov_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a mov ecx/edx,
-  // nothing was inlined.
-  byte b = *mov_instruction_address;
-  if (b != kMovEcxByte && b != kMovEdxByte) return false;
-  // If we don't have the hole check generated, we can only support
-  // "don't delete" cells.
-  if (b == kMovEdxByte && !is_dont_delete) return false;
-
-  Address delta_address = mov_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 4 bytes of the 7-byte
-  // operand-immediate compare instruction, so we add 3 to get the
-  // offset to the last 4 bytes.
-  Address map_address = mov_instruction_address + delta + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The cell is in the last 4 bytes of a five byte mov reg, imm32
-  // instruction, so we add 1 to get the offset to the last 4 bytes.
-  Address offset_address =
-      mov_instruction_address + delta + kOffsetToLoadInstruction + 1;
-  *reinterpret_cast<Object**>(offset_address) = cell;
-  return true;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test eax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test eax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 4 bytes of
-  // the 7-byte operand-immediate compare instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 3;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a six byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 2;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a six byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 2;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map cmp
-  // instruction.  This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 4 bytes
-  // of the 7-byte operand-immediate compare instruction, so we add 3
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 3;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : key
@@ -1519,12 +1347,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.  It is 7 bytes (test reg, imm) plus 6 bytes (jne
-// slow_label).
-const int StoreIC::kOffsetToStoreInstruction = 13;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
deleted file mode 100644
index 76c0d02..0000000
--- a/src/ia32/jump-target-ia32.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (is_bound()) {
-    // Backward jump.  There is an expected frame to merge to.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else if (entry_frame_ != NULL) {
-    // Forward jump with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and jump to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else {
-    // Forward jump.  Remember the current frame and emit a jump to
-    // its merge code.
-    AddReachingFrame(cgen()->frame());
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-    __ jmp(&merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint hint) {
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-
-  if (is_bound()) {
-    ASSERT(direction_ == BIDIRECTIONAL);
-    // Backward branch.  We have an expected frame to merge to on the
-    // backward edge.
-
-    // Swap the current frame for a copy (we do the swapping to get
-    // the off-frame registers off the fall through) to use for the
-    // branch.
-    VirtualFrame* fall_through_frame = cgen()->frame();
-    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
-    RegisterFile non_frame_registers;
-    cgen()->SetFrame(branch_frame, &non_frame_registers);
-
-    // Check if we can avoid merge code.
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen()->frame()->Equals(entry_frame_)) {
-      // Branch right in to the block.
-      cgen()->DeleteFrame();
-      __ j(cc, &entry_label_, hint);
-      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-      return;
-    }
-
-    // Check if we can reuse existing merge code.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (reaching_frames_[i] != NULL &&
-          cgen()->frame()->Equals(reaching_frames_[i])) {
-        // Branch to the merge code.
-        cgen()->DeleteFrame();
-        __ j(cc, &merge_labels_[i], hint);
-        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-        return;
-      }
-    }
-
-    // To emit the merge code here, we negate the condition and branch
-    // around the merge code on the fall through path.
-    Label original_fall_through;
-    __ j(NegateCondition(cc), &original_fall_through, NegateHint(hint));
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-    __ bind(&original_fall_through);
-
-  } else if (entry_frame_ != NULL) {
-    // Forward branch with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and branch to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    // Explicitly use the macro assembler instead of __ as forward
-    // branches are expected to be a fixed size (no inserted
-    // coverage-checking instructions please).  This is used in
-    // Reference::GetValue.
-    cgen()->masm()->j(cc, &entry_label_, hint);
-
-  } else {
-    // Forward branch.  A copy of the current frame is remembered and
-    // a branch to the merge code is emitted.  Explicitly use the
-    // macro assembler instead of __ as forward branches are expected
-    // to be a fixed size (no inserted coverage-checking instructions
-    // please).  This is used in Reference::GetValue.
-    AddReachingFrame(new VirtualFrame(cgen()->frame()));
-    cgen()->masm()->j(cc, &merge_labels_.last(), hint);
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  cgen()->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
-  target_frame->Adjust(1);
-  // We do not expect a call with a preconfigured entry frame.
-  ASSERT(entry_frame_ == NULL);
-  AddReachingFrame(target_frame);
-  __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(cgen() != NULL);
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  // Fast case: the jump target was manually configured with an entry
-  // frame to use.
-  if (entry_frame_ != NULL) {
-    // Assert no reaching frames to deal with.
-    ASSERT(reaching_frames_.is_empty());
-    ASSERT(!cgen()->has_valid_frame());
-
-    RegisterFile empty;
-    if (direction_ == BIDIRECTIONAL) {
-      // Copy the entry frame so the original can be used for a
-      // possible backward jump.
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    } else {
-      // Take ownership of the entry frame.
-      cgen()->SetFrame(entry_frame_, &empty);
-      entry_frame_ = NULL;
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (!is_linked()) {
-    ASSERT(cgen()->has_valid_frame());
-    if (direction_ == FORWARD_ONLY) {
-      // Fast case: no forward jumps and no possible backward jumps.
-      // The stack pointer can be floating above the top of the
-      // virtual frame before the bind.  Afterward, it should not.
-      VirtualFrame* frame = cgen()->frame();
-      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-      if (difference > 0) {
-        frame->stack_pointer_ -= difference;
-        __ add(Operand(esp), Immediate(difference * kPointerSize));
-      }
-    } else {
-      ASSERT(direction_ == BIDIRECTIONAL);
-      // Fast case: no forward jumps, possible backward ones.  Remove
-      // constants and copies above the watermark on the fall-through
-      // frame and use it as the entry frame.
-      cgen()->frame()->MakeMergable();
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (direction_ == FORWARD_ONLY &&
-      !cgen()->has_valid_frame() &&
-      reaching_frames_.length() == 1) {
-    // Fast case: no fall-through, a single forward jump, and no
-    // possible backward jumps.  Pick up the only reaching frame, take
-    // ownership of it, and use it for the block about to be emitted.
-    VirtualFrame* frame = reaching_frames_[0];
-    RegisterFile empty;
-    cgen()->SetFrame(frame, &empty);
-    reaching_frames_[0] = NULL;
-    __ bind(&merge_labels_[0]);
-
-    // The stack pointer can be floating above the top of the
-    // virtual frame before the bind.  Afterward, it should not.
-    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-    if (difference > 0) {
-      frame->stack_pointer_ -= difference;
-      __ add(Operand(esp), Immediate(difference * kPointerSize));
-    }
-
-    __ bind(&entry_label_);
-    return;
-  }
-
-  // If there is a current frame, record it as the fall-through.  It
-  // is owned by the reaching frames for now.
-  bool had_fall_through = false;
-  if (cgen()->has_valid_frame()) {
-    had_fall_through = true;
-    AddReachingFrame(cgen()->frame());  // Return value ignored.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-
-  // Compute the frame to use for entry to the block.
-  ComputeEntryFrame();
-
-  // Some moves required to merge to an expected frame require purely
-  // frame state changes, and do not require any code generation.
-  // Perform those first to increase the possibility of finding equal
-  // frames below.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    if (reaching_frames_[i] != NULL) {
-      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
-    }
-  }
-
-  if (is_linked()) {
-    // There were forward jumps.  Handle merging the reaching frames
-    // to the entry frame.
-
-    // Loop over the (non-null) reaching frames and process any that
-    // need merge code.  Iterate backwards through the list to handle
-    // the fall-through frame first.  Set frames that will be
-    // processed after 'i' to NULL if we want to avoid processing
-    // them.
-    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
-      VirtualFrame* frame = reaching_frames_[i];
-
-      if (frame != NULL) {
-        // Does the frame (probably) need merge code?
-        if (!frame->Equals(entry_frame_)) {
-          // We could have a valid frame as the fall through to the
-          // binding site or as the fall through from a previous merge
-          // code block.  Jump around the code we are about to
-          // generate.
-          if (cgen()->has_valid_frame()) {
-            cgen()->DeleteFrame();
-            __ jmp(&entry_label_);
-          }
-          // Pick up the frame for this block.  Assume ownership if
-          // there cannot be backward jumps.
-          RegisterFile empty;
-          if (direction_ == BIDIRECTIONAL) {
-            cgen()->SetFrame(new VirtualFrame(frame), &empty);
-          } else {
-            cgen()->SetFrame(frame, &empty);
-            reaching_frames_[i] = NULL;
-          }
-          __ bind(&merge_labels_[i]);
-
-          // Loop over the remaining (non-null) reaching frames,
-          // looking for any that can share merge code with this one.
-          for (int j = 0; j < i; j++) {
-            VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen()->frame())) {
-              // Set the reaching frame element to null to avoid
-              // processing it later, and then bind its entry label.
-              reaching_frames_[j] = NULL;
-              __ bind(&merge_labels_[j]);
-            }
-          }
-
-          // Emit the merge code.
-          cgen()->frame()->MergeTo(entry_frame_);
-        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
-          // If this is the fall through frame, and it didn't need
-          // merge code, we need to pick up the frame so we can jump
-          // around subsequent merge blocks if necessary.
-          RegisterFile empty;
-          cgen()->SetFrame(frame, &empty);
-          reaching_frames_[i] = NULL;
-        }
-      }
-    }
-
-    // The code generator may not have a current frame if there was no
-    // fall through and none of the reaching frames needed merging.
-    // In that case, clone the entry frame as the current frame.
-    if (!cgen()->has_valid_frame()) {
-      RegisterFile empty;
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    }
-
-    // There may be unprocessed reaching frames that did not need
-    // merge code.  They will have unbound merge labels.  Bind their
-    // merge labels to be the same as the entry label and deallocate
-    // them.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (!merge_labels_[i].is_bound()) {
-        reaching_frames_[i] = NULL;
-        __ bind(&merge_labels_[i]);
-      }
-    }
-
-    // There are non-NULL reaching frames with bound labels for each
-    // merge block, but only on backward targets.
-  } else {
-    // There were no forward jumps.  There must be a current frame and
-    // this must be a bidirectional target.
-    ASSERT(reaching_frames_.length() == 1);
-    ASSERT(reaching_frames_[0] != NULL);
-    ASSERT(direction_ == BIDIRECTIONAL);
-
-    // Use a copy of the reaching frame so the original can be saved
-    // for possible reuse as a backward merge block.
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
-    __ bind(&merge_labels_[0]);
-    cgen()->frame()->MergeTo(entry_frame_);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-  }
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_ + 1);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 1691098..46c71e8 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -77,7 +77,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -132,7 +132,7 @@
   __ push(edi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
       __ mov(Operand(eax), Immediate(slots));
@@ -254,7 +254,7 @@
 
 bool LCodeGen::GenerateSafepointTable() {
   ASSERT(is_done());
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -386,7 +386,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -408,20 +408,21 @@
 }
 
 
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr,
-                        bool adjusted) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               ContextMode context_mode,
+                               SafepointMode safepoint_mode) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (!adjusted) {
+  if (context_mode == RESTORE_CONTEXT) {
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   }
   __ call(code, mode);
 
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
@@ -432,25 +433,44 @@
 }
 
 
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr,
+                        ContextMode context_mode) {
+  CallCodeGeneric(code, mode, instr, context_mode, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
 void LCodeGen::CallRuntime(const Runtime::Function* fun,
                            int argc,
                            LInstruction* instr,
-                           bool adjusted) {
+                           ContextMode context_mode) {
   ASSERT(instr != NULL);
   ASSERT(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (!adjusted) {
+  if (context_mode == RESTORE_CONTEXT) {
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   }
   __ CallRuntime(fun, argc);
 
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -462,8 +482,16 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        0,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -612,6 +640,7 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(kind == expected_safepoint_kind_);
   const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
@@ -697,38 +726,38 @@
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpConstructResult: {
       RegExpConstructResultStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::RegExpExec: {
       RegExpExecStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::SubString: {
       SubStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::NumberToString: {
       NumberToStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::StringAdd: {
       StringAddStub stub(NO_STRING_ADD_FLAGS);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::StringCompare: {
       StringCompareStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     case CodeStub::TranscendentalCache: {
       TranscendentalCacheStub stub(instr->transcendental_type(),
                                    TranscendentalCacheStub::TAGGED);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
       break;
     }
     default:
@@ -1062,7 +1091,7 @@
     uint64_t int_val = BitCast<uint64_t, double>(v);
     int32_t lower = static_cast<int32_t>(int_val);
     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-    if (isolate()->cpu_features()->IsSupported(SSE4_1)) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
       if (lower != 0) {
         __ Set(temp, Immediate(lower));
@@ -1143,7 +1172,7 @@
 
 void LCodeGen::DoThrow(LThrow* instr) {
   __ push(ToOperand(instr->InputAt(0)));
-  CallRuntime(Runtime::kThrow, 1, instr, false);
+  CallRuntime(Runtime::kThrow, 1, instr, RESTORE_CONTEXT);
 
   if (FLAG_debug_code) {
     Comment("Unreachable code.");
@@ -1218,7 +1247,7 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 }
 
 
@@ -1330,12 +1359,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ pushad();
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ popad();
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 void LCodeGen::DoGoto(LGoto* instr) {
@@ -1837,7 +1862,7 @@
   // Object and function are in fixed registers defined by the stub.
   ASSERT(ToRegister(instr->context()).is(esi));
   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 
   NearLabel true_value, done;
   __ test(eax, Operand(eax));
@@ -1856,7 +1881,7 @@
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
   __ test(eax, Operand(eax));
   EmitBranch(true_block, false_block, zero);
 }
@@ -1928,7 +1953,7 @@
 
 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                 Label* map_check) {
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   flags = static_cast<InstanceofStub::Flags>(
@@ -1939,20 +1964,24 @@
       flags | InstanceofStub::kReturnTrueFalseObject);
   InstanceofStub stub(flags);
 
-  // Get the temp register reserved by the instruction. This needs to be edi as
-  // its slot of the pushing of safepoint registers is used to communicate the
-  // offset to the location of the map check.
+  // Get the temp register reserved by the instruction. This needs to be a
+  // register which is pushed last by PushSafepointRegisters as top of the
+  // stack is used to pass the offset to the location of the map check to
+  // the stub.
   Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(temp.is(edi));
+  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   __ mov(InstanceofStub::right(), Immediate(instr->function()));
   static const int kAdditionalDelta = 16;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   __ mov(temp, Immediate(delta));
   __ StoreToSafepointRegisterSlot(temp, temp);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RESTORE_CONTEXT,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   // Put the result value into the eax slot and restore all registers.
   __ StoreToSafepointRegisterSlot(eax, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -1980,7 +2009,7 @@
   Token::Value op = instr->op();
 
   Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 
   Condition condition = ComputeCompareCondition(op);
   if (op == Token::GT || op == Token::LTE) {
@@ -2003,7 +2032,7 @@
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
   Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 
   // The compare stub expects compare condition and the input operands
   // reversed for GT and LTE.
@@ -2028,11 +2057,11 @@
   }
   __ mov(esp, ebp);
   __ pop(ebp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
   if (instr->hydrogen()->check_hole_value()) {
@@ -2042,7 +2071,20 @@
 }
 
 
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->global_object()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  __ mov(ecx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr, CONTEXT_ADJUSTED);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
 
@@ -2060,6 +2102,19 @@
 }
 
 
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->global_object()).is(edx));
+  ASSERT(ToRegister(instr->value()).is(eax));
+
+  __ mov(ecx, instr->name());
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
+}
+
+
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2122,7 +2177,7 @@
     ASSERT(instr->hydrogen()->need_generic());
     __ mov(ecx, name);
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+    CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else {
     NearLabel done;
     for (int i = 0; i < map_count - 1; ++i) {
@@ -2144,7 +2199,7 @@
       __ bind(&generic);
       __ mov(ecx, name);
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
+      CallCode(ic, RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
     } else {
       DeoptimizeIf(not_equal, instr->environment());
       EmitLoadField(result, object, map, name);
@@ -2161,7 +2216,7 @@
 
   __ mov(ecx, instr->name());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2304,11 +2359,11 @@
         break;
       case kExternalUnsignedIntArray:
         __ mov(result, Operand(external_pointer, key, times_4, 0));
-        __ test(Operand(result), Immediate(0x80000000));
+        __ test(result, Operand(result));
         // TODO(danno): we could be more clever here, perhaps having a special
         // version of the stub that detects if the overflow case actually
         // happens, and generate code that returns a double rather than int.
-        DeoptimizeIf(not_zero, instr->environment());
+        DeoptimizeIf(negative, instr->environment());
         break;
       case kExternalFloatArray:
         UNREACHABLE();
@@ -2324,7 +2379,7 @@
   ASSERT(ToRegister(instr->key()).is(eax));
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2438,7 +2493,7 @@
   SafepointGenerator safepoint_generator(this,
                                          pointers,
                                          env->deoptimization_index());
-  v8::internal::ParameterCount actual(eax);
+  ParameterCount actual(eax);
   __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
 }
 
@@ -2512,7 +2567,7 @@
   }
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
@@ -2534,7 +2589,7 @@
   Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   Label negative;
   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2555,10 +2610,8 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
 
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(eax)) __ mov(tmp, eax);
 
@@ -2574,7 +2627,6 @@
   __ StoreToSafepointRegisterSlot(input_reg, tmp);
 
   __ bind(&done);
-  __ PopSafepointRegisters();
 }
 
 
@@ -2655,25 +2707,16 @@
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
 
+  Label below_half, done;
   // xmm_scratch = 0.5
   ExternalReference one_half = ExternalReference::address_of_one_half();
   __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
 
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_half);
   // input = input + 0.5
   __ addsd(input_reg, xmm_scratch);
 
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
 
   // Compute Math.floor(value + 0.5).
   // Use truncating instruction (OK because input is positive).
@@ -2682,6 +2725,27 @@
   // Overflow is signalled with minint.
   __ cmp(output_reg, 0x80000000u);
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ movmskpd(output_reg, input_reg);
+    __ test(output_reg, Immediate(1));
+    DeoptimizeIf(not_zero, instr->environment());
+  } else {
+    // If the input is >= -0.5, we return +0.
+    __ mov(output_reg, Immediate(0xBF000000));
+    __ movd(xmm_scratch, Operand(output_reg));
+    __ cvtss2sd(xmm_scratch, xmm_scratch);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ Set(output_reg, Immediate(0));
+  __ bind(&done);
 }
 
 
@@ -2763,10 +2827,32 @@
 
 
 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  ASSERT(instr->InputAt(0)->Equals(instr->result()));
+  XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+  NearLabel positive, done, zero, negative;
+  __ xorpd(xmm0, xmm0);
+  __ ucomisd(input_reg, xmm0);
+  __ j(above, &positive);
+  __ j(equal, &zero);
+  ExternalReference nan = ExternalReference::address_of_nan();
+  __ movdbl(input_reg, Operand::StaticVariable(nan));
+  __ jmp(&done);
+  __ bind(&zero);
+  __ push(Immediate(0xFFF00000));
+  __ push(Immediate(0));
+  __ movdbl(input_reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
+  __ jmp(&done);
+  __ bind(&positive);
+  __ fldln2();
+  __ sub(Operand(esp), Immediate(kDoubleSize));
+  __ movdbl(Operand(esp, 0), input_reg);
+  __ fld_d(Operand(esp, 0));
+  __ fyl2x();
+  __ fstp_d(Operand(esp, 0));
+  __ movdbl(input_reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
+  __ bind(&done);
 }
 
 
@@ -2774,7 +2860,7 @@
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 }
 
 
@@ -2782,7 +2868,7 @@
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
 }
 
 
@@ -2819,6 +2905,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->function()).is(edi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(edi, count, CALL_FUNCTION, &generator);
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->key()).is(ecx));
@@ -2827,7 +2928,7 @@
   int arity = instr->arity();
   Handle<Code> ic = isolate()->stub_cache()->
       ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2839,7 +2940,7 @@
   Handle<Code> ic = isolate()->stub_cache()->
       ComputeCallInitialize(arity, NOT_IN_LOOP);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2849,7 +2950,7 @@
 
   int arity = instr->arity();
   CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
   __ Drop(1);
 }
 
@@ -2862,7 +2963,7 @@
   Handle<Code> ic = isolate()->stub_cache()->
       ComputeCallInitialize(arity, NOT_IN_LOOP);
   __ mov(ecx, instr->name());
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -2880,12 +2981,12 @@
 
   Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
   __ Set(eax, Immediate(instr->arity()));
-  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr, CONTEXT_ADJUSTED);
 }
 
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  CallRuntime(instr->function(), instr->arity(), instr, false);
+  CallRuntime(instr->function(), instr->arity(), instr, RESTORE_CONTEXT);
 }
 
 
@@ -2925,10 +3026,10 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -3025,10 +3126,10 @@
   ASSERT(ToRegister(instr->key()).is(ecx));
   ASSERT(ToRegister(instr->value()).is(eax));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -3146,7 +3247,7 @@
   // contained in the register pointer map.
   __ Set(result, Immediate(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3159,16 +3260,12 @@
     __ SmiTag(index);
     __ push(index);
   }
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(eax);
   }
   __ SmiUntag(eax);
   __ StoreToSafepointRegisterSlot(result, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3211,14 +3308,11 @@
   // contained in the register pointer map.
   __ Set(result, Immediate(0));
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ SmiTag(char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(result, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3229,6 +3323,22 @@
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  if (instr->left()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->left()));
+  } else {
+    __ push(ToOperand(instr->left()));
+  }
+  if (instr->right()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->right()));
+  } else {
+    __ push(ToOperand(instr->right()));
+  }
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
+}
+
+
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister() || input->IsStackSlot());
@@ -3265,7 +3375,7 @@
   Register tmp = reg.is(eax) ? ecx : eax;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   // There was overflow, so bits 30 and 31 of the original integer
   // disagree. Try to allocate a heap number in new space and store
@@ -3287,10 +3397,7 @@
   // integer value.
   __ StoreToSafepointRegisterSlot(reg, Immediate(0));
 
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   if (!reg.is(eax)) __ mov(reg, eax);
 
   // Done. Put the value in xmm0 into the value of the allocated heap
@@ -3298,7 +3405,6 @@
   __ bind(&done);
   __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
   __ StoreToSafepointRegisterSlot(reg, reg);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3334,13 +3440,9 @@
   Register reg = ToRegister(instr->result());
   __ Set(reg, Immediate(0));
 
-  __ PushSafepointRegisters();
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   __ StoreToSafepointRegisterSlot(reg, eax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3427,7 +3529,7 @@
     __ jmp(&done);
 
     __ bind(&heap_number);
-    if (isolate()->cpu_features()->IsSupported(SSE3)) {
+    if (CpuFeatures::IsSupported(SSE3)) {
       CpuFeatures::Scope scope(SSE3);
       NearLabel convert;
       // Use more powerful conversion when sse3 is available.
@@ -3537,7 +3639,7 @@
     // the JS bitwise operations.
     __ cvttsd2si(result_reg, Operand(input_reg));
     __ cmp(result_reg, 0x80000000u);
-    if (isolate()->cpu_features()->IsSupported(SSE3)) {
+    if (CpuFeatures::IsSupported(SSE3)) {
       // This will deoptimize if the exponent of the input in out of range.
       CpuFeatures::Scope scope(SSE3);
       NearLabel convert, done;
@@ -3755,16 +3857,16 @@
     FastCloneShallowArrayStub::Mode mode =
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
+    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, RESTORE_CONTEXT);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
+    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, RESTORE_CONTEXT);
   } else {
     FastCloneShallowArrayStub::Mode mode =
         FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   }
 }
 
@@ -3786,9 +3888,12 @@
 
   // Pick the right runtime function to call.
   if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
+    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr, CONTEXT_ADJUSTED);
   } else {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+    CallRuntime(Runtime::kCreateObjectLiteralShallow,
+                4,
+                instr,
+                CONTEXT_ADJUSTED);
   }
 }
 
@@ -3796,7 +3901,7 @@
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   ASSERT(ToRegister(instr->InputAt(0)).is(eax));
   __ push(eax);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
+  CallRuntime(Runtime::kToFastProperties, 1, instr, CONTEXT_ADJUSTED);
 }
 
 
@@ -3821,7 +3926,7 @@
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   __ push(Immediate(instr->hydrogen()->pattern()));
   __ push(Immediate(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, RESTORE_CONTEXT);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -3833,7 +3938,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, RESTORE_CONTEXT);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -3861,14 +3966,14 @@
     FastNewClosureStub stub(
         shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ push(Immediate(shared_info));
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   } else {
     __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(Immediate(shared_info));
     __ push(Immediate(pretenure
                       ? factory()->true_value()
                       : factory()->false_value()));
-    CallRuntime(Runtime::kNewClosure, 3, instr, false);
+    CallRuntime(Runtime::kNewClosure, 3, instr, RESTORE_CONTEXT);
   }
 }
 
@@ -3880,7 +3985,7 @@
   } else {
     __ push(ToOperand(input));
   }
-  CallRuntime(Runtime::kTypeof, 1, instr, false);
+  CallRuntime(Runtime::kTypeof, 1, instr, RESTORE_CONTEXT);
 }
 
 
@@ -4083,7 +4188,7 @@
   __ j(above_equal, &done);
 
   StackCheckStub stub;
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, RESTORE_CONTEXT);
   __ bind(&done);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 4414e6a..f8bbea3 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -61,7 +61,8 @@
         deferred_(8),
         osr_pc_offset_(-1),
         deoptimization_reloc_size(),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -129,7 +130,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -146,8 +147,8 @@
                        Register temporary,
                        Register temporary2);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -164,16 +165,44 @@
   bool GenerateRelocPadding();
   bool GenerateSafepointTable();
 
-  void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
-                bool adjusted = true);
-  void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
-                   bool adjusted = true);
-  void CallRuntime(Runtime::FunctionId id, int argc, LInstruction* instr,
-                   bool adjusted = true) {
+  enum ContextMode {
+    RESTORE_CONTEXT,
+    CONTEXT_ADJUSTED
+  };
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr,
+                ContextMode context_mode);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       ContextMode context_mode,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* fun,
+                   int argc,
+                   LInstruction* instr,
+                   ContextMode context_mode);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int argc,
+                   LInstruction* instr,
+                   ContextMode context_mode) {
     const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, argc, instr, adjusted);
+    CallRuntime(function, argc, instr, context_mode);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -182,7 +211,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode);
+
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -281,6 +312,27 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    }
+
+    ~PushSafepointRegistersScope() {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index eabfecc..3d1da40 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "ia32/lithium-gap-resolver-ia32.h"
 #include "ia32/lithium-codegen-ia32.h"
 
@@ -460,3 +462,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 199a80a..aa91a83 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -303,6 +302,15 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" ");
+  InputAt(1)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[ecx] #%d / ", arity());
 }
@@ -1120,9 +1128,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
@@ -1187,7 +1195,7 @@
 
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
-  LOperand* argument = UseOrConstant(instr->argument());
+  LOperand* argument = UseAny(instr->argument());
   return new LPushArgument(argument);
 }
 
@@ -1222,9 +1230,24 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
-  if (op == kMathLog || op == kMathSin || op == kMathCos) {
+  if (op == kMathLog) {
+    ASSERT(instr->representation().IsDouble());
+    ASSERT(instr->value()->representation().IsDouble());
+    LOperand* input = UseRegisterAtStart(instr->value());
+    LUnaryMathOperation* result = new LUnaryMathOperation(input);
+    return DefineSameAsFirst(result);
+  } else if (op == kMathSin || op == kMathCos) {
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
     LUnaryMathOperation* result = new LUnaryMathOperation(input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
@@ -1633,9 +1656,8 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
-        CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
         LOperand* xmm_temp =
-            (instr->CanTruncateToInt32() && cpu_features->IsSupported(SSE3))
+            (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
@@ -1656,7 +1678,7 @@
     } else {
       ASSERT(to.IsInteger32());
       bool needs_temp = instr->CanTruncateToInt32() &&
-          !Isolate::Current()->cpu_features()->IsSupported(SSE3);
+          !CpuFeatures::IsSupported(SSE3);
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
@@ -1746,20 +1768,39 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* global_object = UseFixed(instr->global_object(), eax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LStoreGlobalCell* result =
+      new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
   return instr->check_hole_value() ? AssignEnvironment(result) : result;
 }
 
 
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* global_object = UseFixed(instr->global_object(), edx);
+  LOperand* value = UseFixed(instr->value(), eax);
+  LStoreGlobalGeneric* result =
+      new LStoreGlobalGeneric(context, global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new LLoadContextSlot(context));
@@ -1978,6 +2019,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), eax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -2022,7 +2070,8 @@
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2110,7 +2159,6 @@
       env->Push(value);
     }
   }
-  ASSERT(env->length() == instr->environment_length());
 
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index a9d769b..76c90be 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -39,6 +39,7 @@
 // Forward declarations.
 class LCodeGen;
 
+
 #define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
   V(ControlInstruction)                         \
   V(Call)                                       \
@@ -106,6 +107,7 @@
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
@@ -121,7 +123,8 @@
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
   V(LoadFunctionPrototype)                      \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -146,12 +149,14 @@
   V(SmiUntag)                                   \
   V(StackCheck)                                 \
   V(StoreContextSlot)                           \
-  V(StoreGlobal)                                \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
@@ -1292,21 +1297,59 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
  public:
-  explicit LStoreGlobal(LOperand* value) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreGlobalCell(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 3, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* context,
+                               LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    inputs_[2] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* context() { return InputAt(0); }
+  LOperand* global_object() { return InputAt(1); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(2); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1410,6 +1453,25 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 2, 0> {
  public:
   LCallKeyed(LOperand* context, LOperand* key) {
@@ -1655,6 +1717,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1716,6 +1779,7 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -1723,6 +1787,22 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
   LOperand* value() { return inputs_[3]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
 };
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index ba30c49..ad567bc 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "debug.h"
 #include "runtime.h"
 #include "serialize.h"
@@ -41,11 +41,14 @@
 // -------------------------------------------------------------------------
 // MacroAssembler implementation.
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      code_object_(isolate()->heap()->undefined_value()) {
+      allow_stub_calls_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -231,7 +234,7 @@
 
 
 void MacroAssembler::FCmp() {
-  if (Isolate::Current()->cpu_features()->IsSupported(CMOV)) {
+  if (CpuFeatures::IsSupported(CMOV)) {
     fucomip();
     ffree(0);
     fincstp();
@@ -1027,19 +1030,6 @@
 }
 
 
-void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
-                                      Register result,
-                                      Register op,
-                                      JumpTarget* then_target) {
-  JumpTarget ok;
-  test(result, Operand(result));
-  ok.Branch(not_zero, taken);
-  test(op, Operand(op));
-  then_target->Branch(sign, not_taken);
-  ok.Bind();
-}
-
-
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
@@ -1988,17 +1978,14 @@
 
 
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
-  int frameAlignment = OS::ActivationFrameAlignment();
-  if (frameAlignment != 0) {
+  int frame_alignment = OS::ActivationFrameAlignment();
+  if (frame_alignment != 0) {
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
     sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frameAlignment));
-    and_(esp, -frameAlignment);
+    ASSERT(IsPowerOf2(frame_alignment));
+    and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
     sub(Operand(esp), Immediate(num_arguments * kPointerSize));
@@ -2016,11 +2003,6 @@
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
-  // Pass current isolate address as additional parameter.
-  mov(Operand(esp, num_arguments * kPointerSize),
-      Immediate(ExternalReference::isolate_address()));
-  num_arguments += 1;
-
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -2030,13 +2012,15 @@
   if (OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
-    add(Operand(esp), Immediate(num_arguments * sizeof(int32_t)));
+    add(Operand(esp), Immediate(num_arguments * kPointerSize));
   }
 }
 
 
 CodePatcher::CodePatcher(byte* address, int size)
-    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+    : address_(address),
+      size_(size),
+      masm_(Isolate::Current(), address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index bafb175..6909272 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,13 +50,16 @@
 typedef Operand MemOperand;
 
 // Forward declaration.
-class JumpTarget;
 class PostCallGenerator;
 
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // ---------------------------------------------------------------------------
   // GC Support
@@ -420,12 +423,6 @@
   // Check if result is zero and op is negative.
   void NegativeZeroTest(Register result, Register op, Label* then_label);
 
-  // Check if result is zero and op is negative in code using jump targets.
-  void NegativeZeroTest(CodeGenerator* cgen,
-                        Register result,
-                        Register op,
-                        JumpTarget* then_target);
-
   // Check if result is zero and any of op1 and op2 are negative.
   // Register scratch is destroyed, and it must be different from op2.
   void NegativeZeroTest(Register result, Register op1, Register op2,
@@ -580,7 +577,10 @@
 
   void Move(Register target, Handle<Object> value);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
 
   // ---------------------------------------------------------------------------
@@ -635,6 +635,10 @@
                                            Register scratch2,
                                            Label* on_not_flat_ascii_strings);
 
+  static int SafepointRegisterStackIndex(Register reg) {
+    return SafepointRegisterStackIndex(reg.code());
+  }
+
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index f1c773b..067f8c8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -99,7 +99,7 @@
 RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
     Mode mode,
     int registers_to_save)
-    : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+    : masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
       mode_(mode),
       num_registers_(registers_to_save),
       num_saved_registers_(registers_to_save),
@@ -372,14 +372,18 @@
     __ push(backtrack_stackpointer());
     __ push(ebx);
 
-    static const int argument_count = 3;
+    static const int argument_count = 4;
     __ PrepareCallCFunction(argument_count, ecx);
     // Put arguments into allocated stack area, last argument highest on stack.
     // Parameters are
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
+    //   Isolate* isolate
 
+    // Set isolate.
+    __ mov(Operand(esp, 3 * kPointerSize),
+           Immediate(ExternalReference::isolate_address()));
     // Set byte_length.
     __ mov(Operand(esp, 2 * kPointerSize), ebx);
     // Set byte_offset2.
@@ -838,8 +842,10 @@
     __ push(edi);
 
     // Call GrowStack(backtrack_stackpointer())
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments, ebx);
+    __ mov(Operand(esp, 2 * kPointerSize),
+           Immediate(ExternalReference::isolate_address()));
     __ lea(eax, Operand(ebp, kStackHighEnd));
     __ mov(Operand(esp, 1 * kPointerSize), eax);
     __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
deleted file mode 100644
index 99ae6eb..0000000
--- a/src/ia32/register-allocator-ia32-inl.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  // The code for this test relies on the order of register codes.
-  return reg.code() >= esp.code() && reg.code() <= esi.code();
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.  The mapping is:
-
-// eax <-> 0, ebx <-> 1, ecx <-> 2, edx <-> 3, edi <-> 4.
-
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // eax
-    2,   // ecx
-    3,   // edx
-    1,   // ebx
-    -1,  // esp
-    -1,  // ebp
-    -1,  // esi
-    4    // edi
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-  // The non-reserved edi register is live on JS function entry.
-  Use(edi);  // JS function.
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_INL_H_
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
deleted file mode 100644
index 6db13d4..0000000
--- a/src/ia32/register-allocator-ia32.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  ASSERT(is_valid());
-  if (is_constant()) {
-    CodeGenerator* code_generator =
-        CodeGeneratorScope::Current(Isolate::Current());
-    Result fresh = code_generator->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    if (is_untagged_int32()) {
-      fresh.set_untagged_int32(true);
-      if (handle()->IsSmi()) {
-      code_generator->masm()->Set(
-          fresh.reg(),
-          Immediate(Smi::cast(*handle())->value()));
-      } else if (handle()->IsHeapNumber()) {
-        double double_value = HeapNumber::cast(*handle())->value();
-        int32_t value = DoubleToInt32(double_value);
-        if (double_value == 0 && signbit(double_value)) {
-          // Negative zero must not be converted to an int32 unless
-          // the context allows it.
-          code_generator->unsafe_bailout_->Branch(equal);
-          code_generator->unsafe_bailout_->Branch(not_equal);
-        } else if (double_value == value) {
-          code_generator->masm()->Set(fresh.reg(), Immediate(value));
-        } else {
-          code_generator->unsafe_bailout_->Branch(equal);
-          code_generator->unsafe_bailout_->Branch(not_equal);
-        }
-      } else {
-        // Constant is not a number.  This was not predicted by AST analysis.
-        code_generator->unsafe_bailout_->Branch(equal);
-        code_generator->unsafe_bailout_->Branch(not_equal);
-      }
-    } else if (code_generator->IsUnsafeSmi(handle())) {
-      code_generator->MoveUnsafeSmi(fresh.reg(), handle());
-    } else {
-      code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
-    }
-    // This result becomes a copy of the fresh one.
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  }
-  ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
-  CodeGenerator* code_generator =
-      CodeGeneratorScope::Current(Isolate::Current());
-  ASSERT(is_valid());
-  if (!is_register() || !reg().is(target)) {
-    Result fresh = code_generator->allocator()->Allocate(target);
-    ASSERT(fresh.is_valid());
-    if (is_register()) {
-      code_generator->masm()->mov(fresh.reg(), reg());
-    } else {
-      ASSERT(is_constant());
-      if (is_untagged_int32()) {
-        if (handle()->IsSmi()) {
-          code_generator->masm()->Set(
-              fresh.reg(),
-              Immediate(Smi::cast(*handle())->value()));
-        } else {
-          ASSERT(handle()->IsHeapNumber());
-          double double_value = HeapNumber::cast(*handle())->value();
-          int32_t value = DoubleToInt32(double_value);
-          if (double_value == 0 && signbit(double_value)) {
-            // Negative zero must not be converted to an int32 unless
-            // the context allows it.
-            code_generator->unsafe_bailout_->Branch(equal);
-            code_generator->unsafe_bailout_->Branch(not_equal);
-          } else if (double_value == value) {
-            code_generator->masm()->Set(fresh.reg(), Immediate(value));
-          } else {
-            code_generator->unsafe_bailout_->Branch(equal);
-            code_generator->unsafe_bailout_->Branch(not_equal);
-          }
-        }
-      } else {
-        if (code_generator->IsUnsafeSmi(handle())) {
-          code_generator->MoveUnsafeSmi(fresh.reg(), handle());
-        } else {
-          code_generator->masm()->Set(fresh.reg(), Immediate(handle()));
-        }
-      }
-    }
-    fresh.set_type_info(type_info());
-    fresh.set_untagged_int32(is_untagged_int32());
-    *this = fresh;
-  } else if (is_register() && reg().is(target)) {
-    ASSERT(code_generator->has_valid_frame());
-    code_generator->frame()->Spill(target);
-    ASSERT(code_generator->allocator()->count(target) == 1);
-  }
-  ASSERT(is_register());
-  ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  Result result = AllocateWithoutSpilling();
-  // Check that the register is a byte register.  If not, unuse the
-  // register if valid and return an invalid result.
-  if (result.is_valid() && !result.reg().is_byte_register()) {
-    result.Unuse();
-    return Result();
-  }
-  return result;
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/register-allocator-ia32.h b/src/ia32/register-allocator-ia32.h
deleted file mode 100644
index e7ce91f..0000000
--- a/src/ia32/register-allocator-ia32.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-#define V8_IA32_REGISTER_ALLOCATOR_IA32_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  static const int kNumRegisters = 5;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_REGISTER_ALLOCATOR_IA32_H_
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 7730ee3..27d2886 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -1921,7 +1921,7 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  if (!isolate()->cpu_features()->IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2)) {
     return isolate()->heap()->undefined_value();
   }
 
@@ -3292,7 +3292,7 @@
       int arg_number = shared->GetThisPropertyAssignmentArgument(i);
       __ mov(ebx, edi);
       __ cmp(eax, arg_number);
-      if (isolate()->cpu_features()->IsSupported(CMOV)) {
+      if (CpuFeatures::IsSupported(CMOV)) {
         CpuFeatures::Scope use_cmov(CMOV);
         __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
       } else {
@@ -3611,10 +3611,10 @@
       // processors that don't support SSE2. The code in IntegerConvert
       // (code-stubs-ia32.cc) is roughly what is needed here though the
       // conversion failure case does not need to be handled.
-      if (isolate()->cpu_features()->IsSupported(SSE2)) {
+      if (CpuFeatures::IsSupported(SSE2)) {
         if (array_type != kExternalIntArray &&
             array_type != kExternalUnsignedIntArray) {
-          ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
+          ASSERT(CpuFeatures::IsSupported(SSE2));
           CpuFeatures::Scope scope(SSE2);
           __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
           // ecx: untagged integer value
@@ -3629,6 +3629,7 @@
                 __ bind(&done);
               }
               __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+              break;
             case kExternalByteArray:
             case kExternalUnsignedByteArray:
               __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
@@ -3642,7 +3643,7 @@
               break;
           }
         } else {
-          if (isolate()->cpu_features()->IsSupported(SSE3)) {
+          if (CpuFeatures::IsSupported(SSE3)) {
             CpuFeatures::Scope scope(SSE3);
             // fisttp stores values as signed integers. To represent the
             // entire range of int and unsigned int arrays, store as a
@@ -3655,7 +3656,7 @@
             __ pop(ecx);
             __ add(Operand(esp), Immediate(kPointerSize));
           } else {
-            ASSERT(isolate()->cpu_features()->IsSupported(SSE2));
+            ASSERT(CpuFeatures::IsSupported(SSE2));
             CpuFeatures::Scope scope(SSE2);
             // We can easily implement the correct rounding behavior for the
             // range [0, 2^31-1]. For the time being, to keep this code simple,
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
deleted file mode 100644
index 2613caf..0000000
--- a/src/ia32/virtual-frame-ia32.cc
+++ /dev/null
@@ -1,1366 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_IA32)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  // Emit code to write elements below the stack pointer to their
-  // (already allocated) stack address.
-  ASSERT(index <= stack_pointer_);
-  FrameElement element = elements_[index];
-  ASSERT(!element.is_synced());
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      break;
-
-    case FrameElement::MEMORY:
-      // This function should not be called with synced elements.
-      // (memory elements are always synced).
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ mov(Operand(ebp, fp_relative(index)), element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      if (cgen()->IsUnsafeSmi(element.handle())) {
-        cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle());
-      } else {
-        __ Set(Operand(ebp, fp_relative(index)),
-               Immediate(element.handle()));
-      }
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing_element = elements_[backing_index];
-      if (backing_element.is_memory()) {
-        Result temp = cgen()->allocator()->Allocate();
-        ASSERT(temp.is_valid());
-        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
-        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
-      } else {
-        ASSERT(backing_element.is_register());
-        __ mov(Operand(ebp, fp_relative(index)), backing_element.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
-  // Sync an element of the frame that is just above the stack pointer
-  // by pushing it.
-  ASSERT(index == stack_pointer_ + 1);
-  stack_pointer_++;
-  FrameElement element = elements_[index];
-
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      __ push(Immediate(Smi::FromInt(0)));
-      break;
-
-    case FrameElement::MEMORY:
-      // No memory elements exist above the stack pointer.
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ push(element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      if (cgen()->IsUnsafeSmi(element.handle())) {
-       cgen()->PushUnsafeSmi(element.handle());
-      } else {
-        __ push(Immediate(element.handle()));
-      }
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing = elements_[backing_index];
-      ASSERT(backing.is_memory() || backing.is_register());
-      if (backing.is_memory()) {
-        __ push(Operand(ebp, fp_relative(backing_index)));
-      } else {
-        __ push(backing.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
-  ASSERT(begin >= 0);
-  ASSERT(end < element_count());
-  // Sync elements below the range if they have not been materialized
-  // on the stack.
-  int start = Min(begin, stack_pointer_ + 1);
-
-  // Emit normal push instructions for elements above stack pointer
-  // and use mov instructions if we are below stack pointer.
-  for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) {
-      if (i <= stack_pointer_) {
-        SyncElementBelowStackPointer(i);
-      } else {
-        SyncElementByPushing(i);
-      }
-    }
-  }
-}
-
-
-void VirtualFrame::MakeMergable() {
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement element = elements_[i];
-
-    // All number type information is reset to unknown for a mergable frame
-    // because of incoming back edges.
-    if (element.is_constant() || element.is_copy()) {
-      if (element.is_synced()) {
-        // Just spill.
-        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
-      } else {
-        // Allocate to a register.
-        FrameElement backing_element;  // Invalid if not a copy.
-        if (element.is_copy()) {
-          backing_element = elements_[element.index()];
-        }
-        Result fresh = cgen()->allocator()->Allocate();
-        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
-        elements_[i] =
-            FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED,
-                                          TypeInfo::Unknown());
-        Use(fresh.reg(), i);
-
-        // Emit a move.
-        if (element.is_constant()) {
-          if (cgen()->IsUnsafeSmi(element.handle())) {
-            cgen()->MoveUnsafeSmi(fresh.reg(), element.handle());
-          } else {
-            __ Set(fresh.reg(), Immediate(element.handle()));
-          }
-        } else {
-          ASSERT(element.is_copy());
-          // Copies are only backed by register or memory locations.
-          if (backing_element.is_register()) {
-            // The backing store may have been spilled by allocating,
-            // but that's OK.  If it was, the value is right where we
-            // want it.
-            if (!fresh.reg().is(backing_element.reg())) {
-              __ mov(fresh.reg(), backing_element.reg());
-            }
-          } else {
-            ASSERT(backing_element.is_memory());
-            __ mov(fresh.reg(), Operand(ebp, fp_relative(element.index())));
-          }
-        }
-      }
-      // No need to set the copied flag --- there are no copies.
-    } else {
-      // Clear the copy flag of non-constant, non-copy elements.
-      // They cannot be copied because copies are not allowed.
-      // The copy flag is not relied on before the end of this loop,
-      // including when registers are spilled.
-      elements_[i].clear_copied();
-      elements_[i].set_type_info(TypeInfo::Unknown());
-    }
-  }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm(), "[ Merge frame");
-  // We should always be merging the code generator's current frame to an
-  // expected frame.
-  ASSERT(cgen()->frame() == this);
-
-  // Adjust the stack pointer upward (toward the top of the virtual
-  // frame) if necessary.
-  if (stack_pointer_ < expected->stack_pointer_) {
-    int difference = expected->stack_pointer_ - stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ sub(Operand(esp), Immediate(difference * kPointerSize));
-  }
-
-  MergeMoveRegistersToMemory(expected);
-  MergeMoveRegistersToRegisters(expected);
-  MergeMoveMemoryToRegisters(expected);
-
-  // Adjust the stack pointer downward if necessary.
-  if (stack_pointer_ > expected->stack_pointer_) {
-    int difference = stack_pointer_ - expected->stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ add(Operand(esp), Immediate(difference * kPointerSize));
-  }
-
-  // At this point, the frames should be identical.
-  ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  // Move registers, constants, and copies to memory.  Perform moves
-  // from the top downward in the frame in order to leave the backing
-  // stores of copies in registers.
-  //
-  // Moving memory-backed copies to memory requires a spare register
-  // for the memory-to-memory moves.  Since we are performing a merge,
-  // we use esi (which is already saved in the frame).  We keep track
-  // of the index of the frame element esi is caching or kIllegalIndex
-  // if esi has not been disturbed.
-  int esi_caches = kIllegalIndex;
-  for (int i = element_count() - 1; i >= 0; i--) {
-    FrameElement target = expected->elements_[i];
-    if (target.is_register()) continue;  // Handle registers later.
-    if (target.is_memory()) {
-      FrameElement source = elements_[i];
-      switch (source.type()) {
-        case FrameElement::INVALID:
-          // Not a legal merge move.
-          UNREACHABLE();
-          break;
-
-        case FrameElement::MEMORY:
-          // Already in place.
-          break;
-
-        case FrameElement::REGISTER:
-          Unuse(source.reg());
-          if (!source.is_synced()) {
-            __ mov(Operand(ebp, fp_relative(i)), source.reg());
-          }
-          break;
-
-        case FrameElement::CONSTANT:
-          if (!source.is_synced()) {
-            if (cgen()->IsUnsafeSmi(source.handle())) {
-              esi_caches = i;
-              cgen()->MoveUnsafeSmi(esi, source.handle());
-              __ mov(Operand(ebp, fp_relative(i)), esi);
-            } else {
-              __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle()));
-            }
-          }
-          break;
-
-        case FrameElement::COPY:
-          if (!source.is_synced()) {
-            int backing_index = source.index();
-            FrameElement backing_element = elements_[backing_index];
-            if (backing_element.is_memory()) {
-              // If we have to spill a register, we spill esi.
-              if (esi_caches != backing_index) {
-                esi_caches = backing_index;
-                __ mov(esi, Operand(ebp, fp_relative(backing_index)));
-              }
-              __ mov(Operand(ebp, fp_relative(i)), esi);
-            } else {
-              ASSERT(backing_element.is_register());
-              __ mov(Operand(ebp, fp_relative(i)), backing_element.reg());
-            }
-          }
-          break;
-      }
-    }
-    elements_[i] = target;
-  }
-
-  if (esi_caches != kIllegalIndex) {
-    __ mov(esi, Operand(ebp, fp_relative(context_index())));
-  }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
-  // We have already done X-to-memory moves.
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    // Move the right value into register i if it is currently in a register.
-    int index = expected->register_location(i);
-    int use_index = register_location(i);
-    // Skip if register i is unused in the target or else if source is
-    // not a register (this is not a register-to-register move).
-    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
-    Register target = RegisterAllocator::ToRegister(i);
-    Register source = elements_[index].reg();
-    if (index != use_index) {
-      if (use_index == kIllegalIndex) {  // Target is currently unused.
-        // Copy contents of source from source to target.
-        // Set frame element register to target.
-        Use(target, index);
-        Unuse(source);
-        __ mov(target, source);
-      } else {
-        // Exchange contents of registers source and target.
-        // Nothing except the register backing use_index has changed.
-        elements_[use_index].set_reg(source);
-        set_register_location(target, index);
-        set_register_location(source, use_index);
-        __ xchg(source, target);
-      }
-    }
-
-    if (!elements_[index].is_synced() &&
-        expected->elements_[index].is_synced()) {
-      __ mov(Operand(ebp, fp_relative(index)), target);
-    }
-    elements_[index] = expected->elements_[index];
-  }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
-  // Move memory, constants, and copies to registers.  This is the
-  // final step and since it is not done from the bottom up, but in
-  // register code order, we have special code to ensure that the backing
-  // elements of copies are in their correct locations when we
-  // encounter the copies.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int index = expected->register_location(i);
-    if (index != kIllegalIndex) {
-      FrameElement source = elements_[index];
-      FrameElement target = expected->elements_[index];
-      Register target_reg = RegisterAllocator::ToRegister(i);
-      ASSERT(target.reg().is(target_reg));
-      switch (source.type()) {
-        case FrameElement::INVALID:  // Fall through.
-          UNREACHABLE();
-          break;
-        case FrameElement::REGISTER:
-          ASSERT(source.Equals(target));
-          // Go to next iteration.  Skips Use(target_reg) and syncing
-          // below.  It is safe to skip syncing because a target
-          // register frame element would only be synced if all source
-          // elements were.
-          continue;
-          break;
-        case FrameElement::MEMORY:
-          ASSERT(index <= stack_pointer_);
-          __ mov(target_reg, Operand(ebp, fp_relative(index)));
-          break;
-
-        case FrameElement::CONSTANT:
-          if (cgen()->IsUnsafeSmi(source.handle())) {
-            cgen()->MoveUnsafeSmi(target_reg, source.handle());
-          } else {
-           __ Set(target_reg, Immediate(source.handle()));
-          }
-          break;
-
-        case FrameElement::COPY: {
-          int backing_index = source.index();
-          FrameElement backing = elements_[backing_index];
-          ASSERT(backing.is_memory() || backing.is_register());
-          if (backing.is_memory()) {
-            ASSERT(backing_index <= stack_pointer_);
-            // Code optimization if backing store should also move
-            // to a register: move backing store to its register first.
-            if (expected->elements_[backing_index].is_register()) {
-              FrameElement new_backing = expected->elements_[backing_index];
-              Register new_backing_reg = new_backing.reg();
-              ASSERT(!is_used(new_backing_reg));
-              elements_[backing_index] = new_backing;
-              Use(new_backing_reg, backing_index);
-              __ mov(new_backing_reg,
-                     Operand(ebp, fp_relative(backing_index)));
-              __ mov(target_reg, new_backing_reg);
-            } else {
-              __ mov(target_reg, Operand(ebp, fp_relative(backing_index)));
-            }
-          } else {
-            __ mov(target_reg, backing.reg());
-          }
-        }
-      }
-      // Ensure the proper sync state.
-      if (target.is_synced() && !source.is_synced()) {
-        __ mov(Operand(ebp, fp_relative(index)), target_reg);
-      }
-      Use(target_reg, index);
-      elements_[index] = target;
-    }
-  }
-}
-
-
-void VirtualFrame::Enter() {
-  // Registers live on entry: esp, ebp, esi, edi.
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  if (FLAG_debug_code) {
-    // Verify that edi contains a JS function.  The following code
-    // relies on eax being available for use.
-    __ test(edi, Immediate(kSmiTagMask));
-    __ Check(not_zero,
-             "VirtualFrame::Enter - edi is not a function (smi check).");
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-    __ Check(equal,
-             "VirtualFrame::Enter - edi is not a function (map check).");
-  }
-#endif
-
-  EmitPush(ebp);
-
-  __ mov(ebp, Operand(esp));
-
-  // Store the context in the frame.  The context is kept in esi and a
-  // copy is stored in the frame.  The external reference to esi
-  // remains.
-  EmitPush(esi);
-
-  // Store the function in the frame.  The frame owns the register
-  // reference now (ie, it can keep it in edi or spill it later).
-  Push(edi);
-  SyncElementAt(element_count() - 1);
-  cgen()->allocator()->Unuse(edi);
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Avoid using the leave instruction here, because it is too
-  // short. We need the return sequence to be a least the size of a
-  // call instruction to support patching the exit code in the
-  // debugger. See VisitReturnStatement for the full return sequence.
-  __ mov(esp, Operand(ebp));
-  stack_pointer_ = frame_pointer();
-  for (int i = element_count() - 1; i > stack_pointer_; i--) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      Unuse(last.reg());
-    }
-  }
-
-  EmitPop(ebp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    // The locals are initialized to a constant (the undefined value), but
-    // we sync them with the actual frame to allocate space for spilling
-    // them later.  First sync everything above the stack pointer so we can
-    // use pushes to allocate and initialize the locals.
-    SyncRange(stack_pointer_ + 1, element_count() - 1);
-    Handle<Object> undefined = FACTORY->undefined_value();
-    FrameElement initial_value =
-        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    if (count == 1) {
-      __ push(Immediate(undefined));
-    } else if (count < kLocalVarBound) {
-      // For less locals the unrolled loop is more compact.
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ Set(temp.reg(), Immediate(undefined));
-      for (int i = 0; i < count; i++) {
-        __ push(temp.reg());
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      Result cnt = cgen()->allocator()->Allocate();
-      Result tmp = cgen()->allocator()->Allocate();
-      ASSERT(cnt.is_valid());
-      ASSERT(tmp.is_valid());
-      __ mov(cnt.reg(), Immediate(count));
-      __ mov(tmp.reg(), Immediate(undefined));
-      __ bind(&alloc_locals_loop);
-      __ push(tmp.reg());
-      __ dec(cnt.reg());
-      __ j(not_zero, &alloc_locals_loop);
-    }
-    for (int i = 0; i < count; i++) {
-      elements_.Add(initial_value);
-      stack_pointer_++;
-    }
-  }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ mov(Operand(ebp, fp_relative(context_index())), esi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ mov(esi, Operand(ebp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  Result temp = cgen()->allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ lea(temp.reg(), ParameterAt(-1));
-  Push(&temp);
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  FrameElement original = elements_[index];
-
-  // Is this element the backing store of any copies?
-  int new_backing_index = kIllegalIndex;
-  if (original.is_copied()) {
-    // Verify it is copied, and find first copy.
-    for (int i = index + 1; i < element_count(); i++) {
-      if (elements_[i].is_copy() && elements_[i].index() == index) {
-        new_backing_index = i;
-        break;
-      }
-    }
-  }
-
-  if (new_backing_index == kIllegalIndex) {
-    // No copies found, return kIllegalIndex.
-    if (original.is_register()) {
-      Unuse(original.reg());
-    }
-    elements_[index] = FrameElement::InvalidElement();
-    return kIllegalIndex;
-  }
-
-  // This is the backing store of copies.
-  Register backing_reg;
-  if (original.is_memory()) {
-    Result fresh = cgen()->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    Use(fresh.reg(), new_backing_index);
-    backing_reg = fresh.reg();
-    __ mov(backing_reg, Operand(ebp, fp_relative(index)));
-  } else {
-    // The original was in a register.
-    backing_reg = original.reg();
-    set_register_location(backing_reg, new_backing_index);
-  }
-  // Invalidate the element at index.
-  elements_[index] = FrameElement::InvalidElement();
-  // Set the new backing element.
-  if (elements_[new_backing_index].is_synced()) {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::SYNCED,
-                                      original.type_info());
-  } else {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::NOT_SYNCED,
-                                      original.type_info());
-  }
-  // Update the other copies.
-  for (int i = new_backing_index + 1; i < element_count(); i++) {
-    if (elements_[i].is_copy() && elements_[i].index() == index) {
-      elements_[i].set_index(new_backing_index);
-      elements_[new_backing_index].set_copied();
-    }
-  }
-  return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  int new_backing_store_index = InvalidateFrameSlotAt(index);
-  if (new_backing_store_index != kIllegalIndex) {
-    elements_.Add(CopyElementAt(new_backing_store_index));
-    return;
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY: {
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      Use(fresh.reg(), element_count());
-      elements_.Add(new_element);
-      __ mov(fresh.reg(), Operand(ebp, fp_relative(index)));
-      break;
-    }
-    case FrameElement::REGISTER:
-      Use(original.reg(), element_count());
-      // Fall through.
-    case FrameElement::CONSTANT:
-    case FrameElement::COPY:
-      original.clear_sync();
-      elements_.Add(original);
-      break;
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  // Store the value on top of the frame to the virtual frame slot at
-  // a given index.  The value on top of the frame is left in place.
-  // This is a duplicating operation, so it can create copies.
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  int top_index = element_count() - 1;
-  FrameElement top = elements_[top_index];
-  FrameElement original = elements_[index];
-  if (top.is_copy() && top.index() == index) return;
-  ASSERT(top.is_valid());
-
-  InvalidateFrameSlotAt(index);
-
-  // InvalidateFrameSlotAt can potentially change any frame element, due
-  // to spilling registers to allocate temporaries in order to preserve
-  // the copy-on-write semantics of aliased elements.  Reload top from
-  // the frame.
-  top = elements_[top_index];
-
-  if (top.is_copy()) {
-    // There are two cases based on the relative positions of the
-    // stored-to slot and the backing slot of the top element.
-    int backing_index = top.index();
-    ASSERT(backing_index != index);
-    if (backing_index < index) {
-      // 1. The top element is a copy of a slot below the stored-to
-      // slot.  The stored-to slot becomes an unsynced copy of that
-      // same backing slot.
-      elements_[index] = CopyElementAt(backing_index);
-    } else {
-      // 2. The top element is a copy of a slot above the stored-to
-      // slot.  The stored-to slot becomes the new (unsynced) backing
-      // slot and both the top element and the element at the former
-      // backing slot become copies of it.  The sync state of the top
-      // and former backing elements is preserved.
-      FrameElement backing_element = elements_[backing_index];
-      ASSERT(backing_element.is_memory() || backing_element.is_register());
-      if (backing_element.is_memory()) {
-        // Because sets of copies are canonicalized to be backed by
-        // their lowest frame element, and because memory frame
-        // elements are backed by the corresponding stack address, we
-        // have to move the actual value down in the stack.
-        //
-        // TODO(209): considering allocating the stored-to slot to the
-        // temp register.  Alternatively, allow copies to appear in
-        // any order in the frame and lazily move the value down to
-        // the slot.
-        Result temp = cgen()->allocator()->Allocate();
-        ASSERT(temp.is_valid());
-        __ mov(temp.reg(), Operand(ebp, fp_relative(backing_index)));
-        __ mov(Operand(ebp, fp_relative(index)), temp.reg());
-      } else {
-        set_register_location(backing_element.reg(), index);
-        if (backing_element.is_synced()) {
-          // If the element is a register, we will not actually move
-          // anything on the stack but only update the virtual frame
-          // element.
-          backing_element.clear_sync();
-        }
-      }
-      elements_[index] = backing_element;
-
-      // The old backing element becomes a copy of the new backing
-      // element.
-      FrameElement new_element = CopyElementAt(index);
-      elements_[backing_index] = new_element;
-      if (backing_element.is_synced()) {
-        elements_[backing_index].set_sync();
-      }
-
-      // All the copies of the old backing element (including the top
-      // element) become copies of the new backing element.
-      for (int i = backing_index + 1; i < element_count(); i++) {
-        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
-          elements_[i].set_index(index);
-        }
-      }
-    }
-    return;
-  }
-
-  // Move the top element to the stored-to slot and replace it (the
-  // top element) with a copy.
-  elements_[index] = top;
-  if (top.is_memory()) {
-    // TODO(209): consider allocating the stored-to slot to the temp
-    // register.  Alternatively, allow copies to appear in any order
-    // in the frame and lazily move the value down to the slot.
-    FrameElement new_top = CopyElementAt(index);
-    new_top.set_sync();
-    elements_[top_index] = new_top;
-
-    // The sync state of the former top element is correct (synced).
-    // Emit code to move the value down in the frame.
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), Operand(esp, 0));
-    __ mov(Operand(ebp, fp_relative(index)), temp.reg());
-  } else if (top.is_register()) {
-    set_register_location(top.reg(), index);
-    // The stored-to slot has the (unsynced) register reference and
-    // the top element becomes a copy.  The sync state of the top is
-    // preserved.
-    FrameElement new_top = CopyElementAt(index);
-    if (top.is_synced()) {
-      new_top.set_sync();
-      elements_[index].clear_sync();
-    }
-    elements_[top_index] = new_top;
-  } else {
-    // The stored-to slot holds the same value as the top but
-    // unsynced.  (We do not have copies of constants yet.)
-    ASSERT(top.is_constant());
-    elements_[index].clear_sync();
-  }
-}
-
-
-void VirtualFrame::UntaggedPushFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  if (original.is_copy()) {
-    original = elements_[original.index()];
-    index = original.index();
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY:
-    case FrameElement::REGISTER:  {
-      Label done;
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      Register fresh_reg = fresh.reg();
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh_reg,
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      new_element.set_untagged_int32(true);
-      Use(fresh_reg, element_count());
-      fresh.Unuse();  // BreakTarget does not handle a live Result well.
-      elements_.Add(new_element);
-      if (original.is_register()) {
-        __ mov(fresh_reg, original.reg());
-      } else {
-        ASSERT(original.is_memory());
-        __ mov(fresh_reg, Operand(ebp, fp_relative(index)));
-      }
-      // Now convert the value to int32, or bail out.
-      if (original.type_info().IsSmi()) {
-        __ SmiUntag(fresh_reg);
-        // Pushing the element is completely done.
-      } else {
-        __ test(fresh_reg, Immediate(kSmiTagMask));
-        Label not_smi;
-        __ j(not_zero, &not_smi);
-        __ SmiUntag(fresh_reg);
-        __ jmp(&done);
-
-        __ bind(&not_smi);
-        if (!original.type_info().IsNumber()) {
-          __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset),
-                 FACTORY->heap_number_map());
-          cgen()->unsafe_bailout_->Branch(not_equal);
-        }
-
-        if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) {
-          UNREACHABLE();
-        } else {
-          CpuFeatures::Scope use_sse2(SSE2);
-          __ movdbl(xmm0, FieldOperand(fresh_reg, HeapNumber::kValueOffset));
-          __ cvttsd2si(fresh_reg, Operand(xmm0));
-          __ cvtsi2sd(xmm1, Operand(fresh_reg));
-          __ ucomisd(xmm0, xmm1);
-          cgen()->unsafe_bailout_->Branch(not_equal);
-          cgen()->unsafe_bailout_->Branch(parity_even);  // NaN.
-          // Test for negative zero.
-          __ test(fresh_reg, Operand(fresh_reg));
-          __ j(not_zero, &done);
-          __ movmskpd(fresh_reg, xmm0);
-          __ and_(fresh_reg, 0x1);
-          cgen()->unsafe_bailout_->Branch(not_equal);
-        }
-        __ bind(&done);
-      }
-      break;
-    }
-    case FrameElement::CONSTANT:
-      elements_.Add(CopyElementAt(index));
-      elements_[element_count() - 1].set_untagged_int32(true);
-      break;
-    case FrameElement::COPY:
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  // Grow the expression stack by handler size less one (the return
-  // address is already pushed by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallStub(stub);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
-  PrepareForCall(0, 0);
-  arg->ToRegister(eax);
-  arg->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
-  PrepareForCall(0, 0);
-
-  if (arg0->is_register() && arg0->reg().is(eax)) {
-    if (arg1->is_register() && arg1->reg().is(edx)) {
-      // Wrong registers.
-      __ xchg(eax, edx);
-    } else {
-      // Register edx is free for arg0, which frees eax for arg1.
-      arg0->ToRegister(edx);
-      arg1->ToRegister(eax);
-    }
-  } else {
-    // Register eax is free for arg1, which guarantees edx is free for
-    // arg0.
-    arg1->ToRegister(eax);
-    arg0->ToRegister(edx);
-  }
-
-  arg0->Unuse();
-  arg1->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
-  Result function = Pop();
-
-  // InvokeFunction requires function in edi.  Move it in there.
-  function.ToRegister(edi);
-  function.Unuse();
-
-  // +1 for receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(edi, count, CALL_FUNCTION);
-  RestoreContextRegister();
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  PrepareForCall(0, 0);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
-                                   int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ InvokeBuiltin(id, flag);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ call(code, rmode);
-  Result result = cgen()->allocator()->Allocate(eax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
-                                          Result* b,
-                                          Register a_reg,
-                                          Register b_reg) {
-  if (a->is_register() && a->reg().is(a_reg)) {
-    b->ToRegister(b_reg);
-  } else if (!cgen()->allocator()->is_used(a_reg)) {
-    a->ToRegister(a_reg);
-    b->ToRegister(b_reg);
-  } else if (cgen()->allocator()->is_used(b_reg)) {
-    // a must be in b_reg, b in a_reg.
-    __ xchg(a_reg, b_reg);
-    // Results a and b will be invalidated, so it is ok if they are switched.
-  } else {
-    b->ToRegister(b_reg);
-    a->ToRegister(a_reg);
-  }
-  a->Unuse();
-  b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
-  // Name and receiver are on the top of the frame.  The IC expects
-  // name in ecx and receiver in eax.
-  Result name = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);  // No stack arguments.
-  MoveResultsToRegisters(&name, &receiver, ecx, eax);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame. Put them in eax and edx.
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&key, &receiver, eax, edx);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
-                                 bool is_contextual,
-                                 StrictModeFlag strict_mode) {
-  // Value and (if not contextual) receiver are on top of the frame.
-  // The IC expects name in ecx, value in eax, and receiver in edx.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-
-  Result value = Pop();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    PrepareForCall(0, 0);
-    value.ToRegister(eax);
-    __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    value.Unuse();
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    Result receiver = Pop();
-    PrepareForCall(0, 0);
-    MoveResultsToRegisters(&value, &receiver, eax, edx);
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ mov(ecx, name);
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in eax, key in ecx, and receiver in edx.
-  Result value = Pop();
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  if (!cgen()->allocator()->is_used(eax) ||
-      (value.is_register() && value.reg().is(eax))) {
-    if (!cgen()->allocator()->is_used(eax)) {
-      value.ToRegister(eax);
-    }
-    MoveResultsToRegisters(&key, &receiver, ecx, edx);
-    value.Unuse();
-  } else if (!cgen()->allocator()->is_used(ecx) ||
-             (key.is_register() && key.reg().is(ecx))) {
-    if (!cgen()->allocator()->is_used(ecx)) {
-      key.ToRegister(ecx);
-    }
-    MoveResultsToRegisters(&value, &receiver, eax, edx);
-    key.Unuse();
-  } else if (!cgen()->allocator()->is_used(edx) ||
-             (receiver.is_register() && receiver.reg().is(edx))) {
-    if (!cgen()->allocator()->is_used(edx)) {
-      receiver.ToRegister(edx);
-    }
-    MoveResultsToRegisters(&key, &value, ecx, eax);
-    receiver.Unuse();
-  } else {
-    // All three registers are used, and no value is in the correct place.
-    // We have one of the two circular permutations of eax, ecx, edx.
-    ASSERT(value.is_register());
-    if (value.reg().is(ecx)) {
-      __ xchg(eax, edx);
-      __ xchg(eax, ecx);
-    } else {
-      __ xchg(eax, ecx);
-      __ xchg(eax, edx);
-    }
-    value.Unuse();
-    key.Unuse();
-    receiver.Unuse();
-  }
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
-                                int arg_count,
-                                int loop_nesting) {
-  // Function name, arguments, and receiver are on top of the frame.
-  // The IC expects the name in ecx and the rest on the stack and
-  // drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic = Isolate::Current()->stub_cache()->ComputeCallInitialize(
-      arg_count, in_loop);
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  Result name = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Arguments + receiver.
-  name.ToRegister(ecx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
-                                     int arg_count,
-                                     int loop_nesting) {
-  // Function name, arguments, and receiver are on top of the frame.
-  // The IC expects the name in ecx and the rest on the stack and
-  // drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      Isolate::Current()->stub_cache()->ComputeKeyedCallInitialize(arg_count,
-                                                                   in_loop);
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  Result name = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Arguments + receiver.
-  name.ToRegister(ecx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
-  // Arguments, receiver, and function are on top of the frame.  The
-  // IC expects arg count in eax, function in edi, and the arguments
-  // and receiver on the stack.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  // Duplicate the function before preparing the frame.
-  PushElementAt(arg_count);
-  Result function = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill function and args.
-  function.ToRegister(edi);
-
-  // Constructors are called with the number of arguments in register
-  // eax for now. Another option would be to have separate construct
-  // call trampolines per different arguments counts encountered.
-  Result num_args = cgen()->allocator()->Allocate(eax);
-  ASSERT(num_args.is_valid());
-  __ Set(num_args.reg(), Immediate(arg_count));
-
-  function.Unuse();
-  num_args.Unuse();
-  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ add(Operand(esp), Immediate(num_dropped * kPointerSize));
-  }
-
-  // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
-  }
-}
-
-
-Result VirtualFrame::Pop() {
-  FrameElement element = elements_.RemoveLast();
-  int index = element_count();
-  ASSERT(element.is_valid());
-  ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode());
-
-  // Get number type information of the result.
-  TypeInfo info;
-  if (!element.is_copy()) {
-    info = element.type_info();
-  } else {
-    info = elements_[element.index()].type_info();
-  }
-
-  bool pop_needed = (stack_pointer_ == index);
-  if (pop_needed) {
-    stack_pointer_--;
-    if (element.is_memory()) {
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ pop(temp.reg());
-      temp.set_type_info(info);
-      temp.set_untagged_int32(element.is_untagged_int32());
-      return temp;
-    }
-
-    __ add(Operand(esp), Immediate(kPointerSize));
-  }
-  ASSERT(!element.is_memory());
-
-  // The top element is a register, constant, or a copy.  Unuse
-  // registers and follow copies to their backing store.
-  if (element.is_register()) {
-    Unuse(element.reg());
-  } else if (element.is_copy()) {
-    ASSERT(!element.is_untagged_int32());
-    ASSERT(element.index() < index);
-    index = element.index();
-    element = elements_[index];
-  }
-  ASSERT(!element.is_copy());
-
-  // The element is memory, a register, or a constant.
-  if (element.is_memory()) {
-    // Memory elements could only be the backing store of a copy.
-    // Allocate the original to a register.
-    ASSERT(index <= stack_pointer_);
-    ASSERT(!element.is_untagged_int32());
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    Use(temp.reg(), index);
-    FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(),
-                                      FrameElement::SYNCED,
-                                      element.type_info());
-    // Preserve the copy flag on the element.
-    if (element.is_copied()) new_element.set_copied();
-    elements_[index] = new_element;
-    __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
-    return Result(temp.reg(), info);
-  } else if (element.is_register()) {
-    Result return_value(element.reg(), info);
-    return_value.set_untagged_int32(element.is_untagged_int32());
-    return return_value;
-  } else {
-    ASSERT(element.is_constant());
-    Result return_value(element.handle());
-    return_value.set_untagged_int32(element.is_untagged_int32());
-    return return_value;
-  }
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(Operand operand) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(immediate);
-}
-
-
-void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
-  ASSERT(!ConstantPoolOverflowed());
-  elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
-  elements_[element_count() - 1].set_untagged_int32(true);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
-  ASSERT(expr->IsTrivial());
-
-  Literal* lit = expr->AsLiteral();
-  if (lit != NULL) {
-    Push(lit->handle());
-    return;
-  }
-
-  VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL) {
-    Slot* slot = proxy->var()->AsSlot();
-    if (slot->type() == Slot::LOCAL) {
-      PushLocalAt(slot->index());
-      return;
-    }
-    if (slot->type() == Slot::PARAMETER) {
-      PushParameterAt(slot->index());
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
-  if (ConstantPoolOverflowed()) {
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ Set(temp.reg(), Immediate(value));
-    Push(&temp);
-  } else {
-    FrameElement element =
-        FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
-    elements_.Add(element);
-  }
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
deleted file mode 100644
index 504a8fc..0000000
--- a/src/ia32/virtual-frame-ia32.h
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
-#define V8_IA32_VIRTUAL_FRAME_IA32_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame: public ZoneObject {
- public:
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
-  // generator is being transformed.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
-      ASSERT(cgen()->has_valid_frame());
-      cgen()->frame()->SpillAll();
-      cgen()->set_in_spilled_code(true);
-    }
-
-    ~SpilledScope() {
-      cgen()->set_in_spilled_code(previous_state_);
-    }
-
-   private:
-    bool previous_state_;
-
-    CodeGenerator* cgen() {
-      return CodeGeneratorScope::Current(Isolate::Current());
-    }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  CodeGenerator* cgen() {
-    return CodeGeneratorScope::Current(Isolate::Current());
-  }
-
-  MacroAssembler* masm() { return cgen()->masm(); }
-
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-    TypeInfo info = TypeInfo::Uninitialized());
-
-  // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
-
-  // The height of the virtual expression stack.
-  int height() { return element_count() - expression_base_index(); }
-
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  inline int register_location(Register reg);
-
-  inline void set_register_location(Register reg, int index);
-
-  bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
-  }
-
-  inline bool is_used(Register reg);
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget count elements from the top of the frame all in-memory
-  // (including synced) and adjust the stack pointer downward, to
-  // match an external frame effect (examples include a call removing
-  // its arguments, and exiting a try/catch removing an exception
-  // handler).  No code will be emitted.
-  void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    ForgetElements(count);
-  }
-
-  // Forget count elements from the top of the frame without adjusting
-  // the stack pointer downward.  This is used, for example, before
-  // merging frames at break, continue, and return targets.
-  void ForgetElements(int count);
-
-  // Spill all values from the frame to memory.
-  inline void SpillAll();
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
-  }
-
-  // Make the two registers distinct and spill them.  Returns the second
-  // register.  If the registers were not distinct then it returns the new
-  // second register.
-  Result MakeDistinctAndSpilled(Result* left, Result* right) {
-    Spill(left->reg());
-    Spill(right->reg());
-    if (left->reg().is(right->reg())) {
-      RegisterAllocator* allocator = cgen()->allocator();
-      Result fresh = allocator->Allocate();
-      ASSERT(fresh.is_valid());
-      masm()->mov(fresh.reg(), right->reg());
-      return fresh;
-    }
-    return *right;
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
-  Register SpillAnyRegister();
-
-  // Spill the top element of the frame.
-  void SpillTop() { SpillElementAt(element_count() - 1); }
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Make this frame so that an arbitrary frame of the same height can
-  // be merged to it.  Copies and constants are removed from the frame.
-  void MakeMergable();
-
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected);
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Use(i);
-    }
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by spilling locals.  This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 10;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // An element of the expression stack as an assembly operand.
-  Operand ElementAt(int index) const {
-    return Operand(esp, index * kPointerSize);
-  }
-
-  // Random-access store to a frame-top relative frame element.  The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant.  The index is frame-top relative.
-  inline void SetElementAt(int index, Handle<Object> value);
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
-  void StoreToElementAt(int index) {
-    StoreToFrameSlotAt(element_count() - index - 1);
-  }
-
-  // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return Operand(ebp, kLocal0Offset - index * kPointerSize);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void UntaggedPushLocalAt(int index) {
-    UntaggedPushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot.  The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // Push the function on top of the frame.
-  void PushFunction() {
-    PushFrameSlotAt(function_index());
-  }
-
-  // Save the value of the esi register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the esi register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
-
-  // A parameter as an assembly operand.
-  Operand ParameterAt(int index) {
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count());
-    return Operand(ebp, (1 + parameter_count() - index) * kPointerSize);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void UntaggedPushParameterAt(int index) {
-    UntaggedPushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
-
-  // The receiver frame slot.
-  Operand Receiver() {
-    return ParameterAt(-1);
-  }
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline Result CallStub(CodeStub* stub, int arg_count);
-
-  // Call stub that takes a single argument passed in eax.  The
-  // argument is given as a result which does not have to be eax or
-  // even a register.  The argument is consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg);
-
-  // Call stub that takes a pair of arguments passed in edx (arg0) and
-  // eax (arg1).  The arguments are given as results which do not have
-  // to be in the proper registers or even in registers.  The
-  // arguments are consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  Result CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  Result CallRuntime(const Runtime::Function* f, int arg_count);
-  Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
-
-  // Call load IC.  Name and receiver are found on top of the frame.
-  // Both are dropped.
-  Result CallLoadIC(RelocInfo::Mode mode);
-
-  // Call keyed load IC.  Key and receiver are found on top of the
-  // frame.  Both are dropped.
-  Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
-  // Call store IC.  If the load is contextual, value is found on top of the
-  // frame.  If not, value and receiver are on the frame.  Both are dropped.
-  Result CallStoreIC(Handle<String> name, bool is_contextual,
-                     StrictModeFlag strict_mode);
-
-  // Call keyed store IC.  Value, key, and receiver are found on top
-  // of the frame.  All three are dropped.
-  Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call call IC.  Function name, arguments, and receiver are found on top
-  // of the frame and dropped by the call.  The argument count does not
-  // include the receiver.
-  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Call keyed call IC.  Same calling convention as CallCallIC.
-  Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Allocate and call JS function as constructor.  Arguments,
-  // receiver (global object), and function are found on top of the
-  // frame.  Function is not dropped.  The argument count does not
-  // include the receiver.
-  Result CallConstructor(int arg_count);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() {
-    Drop(1);
-  }
-
-  // Duplicate the top element of the frame.
-  void Dup() {
-    PushFrameSlotAt(element_count() - 1);
-  }
-
-  // Pop an element from the top of the expression stack.  Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  void EmitPop(Operand operand);
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Operand operand,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Immediate immediate,
-                TypeInfo info = TypeInfo::Unknown());
-
-  inline bool ConstantPoolOverflowed();
-
-  // Push an element on the virtual frame.
-  void Push(Handle<Object> value);
-  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
-  inline void Push(Smi* value);
-
-  void PushUntaggedElement(Handle<Object> value);
-
-  // Pushing a result invalidates it (its contents become owned by the
-  // frame).
-  void Push(Result* result) {
-    // This assert will trigger if you try to push the same value twice.
-    ASSERT(result->is_valid());
-    if (result->is_register()) {
-      Push(result->reg(), result->type_info());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    if (cgen()->in_safe_int32_mode()) {
-      ASSERT(result->is_untagged_int32());
-      elements_[element_count() - 1].set_untagged_int32(true);
-    }
-    result->Unuse();
-  }
-
-  // Pushing an expression expects that the expression is trivial (according
-  // to Expression::IsTrivial).
-  void Push(Expression* expr);
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  inline void Nip(int num_dropped);
-
-  // Check that the frame has no elements containing untagged int32 elements.
-  bool HasNoUntaggedInt32Elements() {
-    for (int i = 0; i < element_count(); ++i) {
-      if (elements_[i].is_untagged_int32()) return false;
-    }
-    return true;
-  }
-
-  // Update the type information of a variable frame element directly.
-  inline void SetTypeForLocalAt(int index, TypeInfo info);
-  inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  ZoneList<FrameElement> elements_;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the esp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count();
-
-  inline int local_count();
-
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).  The parameters, receiver, and return address
-  // are below the frame pointer.
-  int frame_pointer() {
-    return parameter_count() + 2;
-  }
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() {
-    return 1;
-  }
-
-  // The index of the context slot in the frame.  It is immediately
-  // above the frame pointer.
-  int context_index() {
-    return frame_pointer() + 1;
-  }
-
-  // The index of the function slot in the frame.  It is above the frame
-  // pointer and the context slot.
-  int function_index() {
-    return frame_pointer() + 2;
-  }
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lie the context and the function.
-  int local0_index() {
-    return frame_pointer() + 3;
-  }
-
-  // The index of the base of the expression stack.
-  int expression_base_index() {
-    return local0_index() + local_count();
-  }
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
-
-  // Record an occurrence of a register in the virtual frame.  This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame.  This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index.  If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  inline void PushFrameSlotAt(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame, at an untagged int32 value.  Bails out if the value is not
-  // an int32.
-  void UntaggedPushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
-
-  // This function assumes that a and b are the only results that could be in
-  // the registers a_reg or b_reg.  Other results can be live, but must not
-  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
-  void MoveResultsToRegisters(Result* a,
-                              Result* b,
-                              Register a_reg,
-                              Register b_reg);
-
-  // Call a code stub that has already been prepared for calling (via
-  // PrepareForCall).
-  Result RawCallStub(CodeStub* stub);
-
-  // Calls a code object which has already been prepared for calling
-  // (via PrepareForCall).
-  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
-  inline bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class FrameRegisterState;
-  friend class JumpTarget;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_IA32_VIRTUAL_FRAME_IA32_H_
diff --git a/src/ic.cc b/src/ic.cc
index 382b438..2299922 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -282,7 +282,6 @@
       return KeyedStoreIC::Clear(address, target);
     case Code::CALL_IC: return CallIC::Clear(address, target);
     case Code::KEYED_CALL_IC:  return KeyedCallIC::Clear(address, target);
-    case Code::BINARY_OP_IC:
     case Code::TYPE_RECORDING_BINARY_OP_IC:
     case Code::COMPARE_IC:
       // Clearing these is tricky and does not
@@ -305,54 +304,23 @@
 }
 
 
-void KeyedLoadIC::ClearInlinedVersion(Address address) {
-  // Insert null as the map to check for to make sure the map check fails
-  // sending control flow to the IC instead of the inlined version.
-  PatchInlinedLoad(address, HEAP->null_value());
-}
-
-
 void KeyedLoadIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   // Make sure to also clear the map used in inline fast cases.  If we
   // do not clear these maps, cached code can keep objects alive
   // through the embedded maps.
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address, initialize_stub());
 }
 
 
-void LoadIC::ClearInlinedVersion(Address address) {
-  // Reset the map check of the inlined inobject property load (if
-  // present) to guarantee failure by holding an invalid map (the null
-  // value).  The offset can be patched to anything.
-  Heap* heap = HEAP;
-  PatchInlinedLoad(address, heap->null_value(), 0);
-  PatchInlinedContextualLoad(address,
-                             heap->null_value(),
-                             heap->null_value(),
-                             true);
-}
-
-
 void LoadIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address, initialize_stub());
 }
 
 
-void StoreIC::ClearInlinedVersion(Address address) {
-  // Reset the map check of the inlined inobject property store (if
-  // present) to guarantee failure by holding an invalid map (the null
-  // value).  The offset can be patched to anything.
-  PatchInlinedStore(address, HEAP->null_value(), 0);
-}
-
-
 void StoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
-  ClearInlinedVersion(address);
   SetTargetAtAddress(address,
       (target->extra_ic_state() == kStrictMode)
         ? initialize_stub_strict()
@@ -360,21 +328,6 @@
 }
 
 
-void KeyedStoreIC::ClearInlinedVersion(Address address) {
-  // Insert null as the elements map to check for.  This will make
-  // sure that the elements fast-case map check fails so that control
-  // flows to the IC instead of the inlined version.
-  PatchInlinedStore(address, HEAP->null_value());
-}
-
-
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {
-  // Restore the fast-case elements map check so that the inlined
-  // version can be used again.
-  PatchInlinedStore(address, HEAP->fixed_array_map());
-}
-
-
 void KeyedStoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
@@ -874,9 +827,6 @@
 #endif
       if (state == PREMONOMORPHIC) {
         if (object->IsString()) {
-          Map* map = HeapObject::cast(*object)->map();
-          const int offset = String::kLengthOffset;
-          PatchInlinedLoad(address(), map, offset);
           set_target(isolate()->builtins()->builtin(
               Builtins::kLoadIC_StringLength));
         } else {
@@ -904,9 +854,6 @@
       if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
       if (state == PREMONOMORPHIC) {
-        Map* map = HeapObject::cast(*object)->map();
-        const int offset = JSArray::kLengthOffset;
-        PatchInlinedLoad(address(), map, offset);
         set_target(isolate()->builtins()->builtin(
             Builtins::kLoadIC_ArrayLength));
       } else {
@@ -949,63 +896,6 @@
     LOG(isolate(), SuspectReadEvent(*name, *object));
   }
 
-  bool can_be_inlined_precheck =
-      FLAG_use_ic &&
-      lookup.IsProperty() &&
-      lookup.IsCacheable() &&
-      lookup.holder() == *object &&
-      !object->IsAccessCheckNeeded();
-
-  bool can_be_inlined =
-      can_be_inlined_precheck &&
-      state == PREMONOMORPHIC &&
-      lookup.type() == FIELD;
-
-  bool can_be_inlined_contextual =
-      can_be_inlined_precheck &&
-      state == UNINITIALIZED &&
-      lookup.holder()->IsGlobalObject() &&
-      lookup.type() == NORMAL;
-
-  if (can_be_inlined) {
-    Map* map = lookup.holder()->map();
-    // Property's index in the properties array.  If negative we have
-    // an inobject property.
-    int index = lookup.GetFieldIndex() - map->inobject_properties();
-    if (index < 0) {
-      // Index is an offset from the end of the object.
-      int offset = map->instance_size() + (index * kPointerSize);
-      if (PatchInlinedLoad(address(), map, offset)) {
-        set_target(megamorphic_stub());
-        TRACE_IC_NAMED("[LoadIC : inline patch %s]\n", name);
-        return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
-      } else {
-        TRACE_IC_NAMED("[LoadIC : no inline patch %s (patching failed)]\n",
-                       name);
-      }
-    } else {
-      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inobject)]\n", name);
-    }
-  } else if (can_be_inlined_contextual) {
-    Map* map = lookup.holder()->map();
-    JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
-        lookup.holder()->property_dictionary()->ValueAt(
-            lookup.GetDictionaryEntry()));
-    if (PatchInlinedContextualLoad(address(),
-                                   map,
-                                   cell,
-                                   lookup.IsDontDelete())) {
-      set_target(megamorphic_stub());
-      TRACE_IC_NAMED("[LoadIC : inline contextual patch %s]\n", name);
-      ASSERT(cell->value() != isolate()->heap()->the_hole_value());
-      return cell->value();
-    }
-  } else {
-    if (FLAG_use_ic && state == PREMONOMORPHIC) {
-      TRACE_IC_NAMED("[LoadIC : no inline patch %s (not inlinable)]\n", name);
-    }
-  }
-
   // Update inline cache and stub cache.
   if (FLAG_use_ic) {
     UpdateCaches(&lookup, state, object, name);
@@ -1143,6 +1033,16 @@
 MaybeObject* KeyedLoadIC::Load(State state,
                                Handle<Object> object,
                                Handle<Object> key) {
+  // Check for values that can be converted into a symbol.
+  // TODO(1295): Remove this code.
+  HandleScope scope(isolate());
+  if (key->IsHeapNumber() &&
+      isnan(HeapNumber::cast(*key)->value())) {
+    key = isolate()->factory()->nan_symbol();
+  } else if (key->IsUndefined()) {
+    key = isolate()->factory()->undefined_symbol();
+  }
+
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
@@ -1285,18 +1185,6 @@
 #ifdef DEBUG
     TraceIC("KeyedLoadIC", key, state, target());
 #endif  // DEBUG
-
-    // For JSObjects with fast elements that are not value wrappers
-    // and that do not have indexed interceptors, we initialize the
-    // inlined fast case (if present) by patching the inlined map
-    // check.
-    if (object->IsJSObject() &&
-        !object->IsJSValue() &&
-        !JSObject::cast(*object)->HasIndexedInterceptor() &&
-        JSObject::cast(*object)->HasFastElements()) {
-      Map* map = JSObject::cast(*object)->map();
-      PatchInlinedLoad(address(), map);
-    }
   }
 
   // Get the property.
@@ -1462,57 +1350,7 @@
     LookupResult lookup;
 
     if (LookupForWrite(*receiver, *name, &lookup)) {
-      bool can_be_inlined =
-          state == UNINITIALIZED &&
-          lookup.IsProperty() &&
-          lookup.holder() == *receiver &&
-          lookup.type() == FIELD &&
-          !receiver->IsAccessCheckNeeded();
-
-      if (can_be_inlined) {
-        Map* map = lookup.holder()->map();
-        // Property's index in the properties array.  If negative we have
-        // an inobject property.
-        int index = lookup.GetFieldIndex() - map->inobject_properties();
-        if (index < 0) {
-          // Index is an offset from the end of the object.
-          int offset = map->instance_size() + (index * kPointerSize);
-          if (PatchInlinedStore(address(), map, offset)) {
-            set_target((strict_mode == kStrictMode)
-                         ? megamorphic_stub_strict()
-                         : megamorphic_stub());
-#ifdef DEBUG
-            if (FLAG_trace_ic) {
-              PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
-            }
-#endif
-            return receiver->SetProperty(*name, *value, NONE, strict_mode);
-#ifdef DEBUG
-
-          } else {
-            if (FLAG_trace_ic) {
-              PrintF("[StoreIC : no inline patch %s (patching failed)]\n",
-                     *name->ToCString());
-            }
-          }
-        } else {
-          if (FLAG_trace_ic) {
-            PrintF("[StoreIC : no inline patch %s (not inobject)]\n",
-                   *name->ToCString());
-          }
-        }
-      } else {
-        if (state == PREMONOMORPHIC) {
-          if (FLAG_trace_ic) {
-            PrintF("[StoreIC : no inline patch %s (not inlinable)]\n",
-                   *name->ToCString());
-#endif
-          }
-        }
-      }
-
-      // If no inlined store ic was patched, generate a stub for this
-      // store.
+      // Generate a stub for this store.
       UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     } else {
       // Strict mode doesn't allow setting non-existent global property
@@ -1815,8 +1653,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* CallIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   CallIC ic(isolate);
@@ -1846,8 +1683,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedCallIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedCallIC ic(isolate);
@@ -1868,8 +1704,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* LoadIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   LoadIC ic(isolate);
@@ -1879,8 +1714,7 @@
 
 
 // Used from ic-<arch>.cc
-MUST_USE_RESULT MaybeObject* KeyedLoadIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
@@ -1890,8 +1724,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* StoreIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   StoreIC ic(isolate);
@@ -1905,8 +1738,7 @@
 }
 
 
-MUST_USE_RESULT MaybeObject* StoreIC_ArrayLength(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
   NoHandleAllocation nha;
 
   ASSERT(args.length() == 2);
@@ -1927,9 +1759,7 @@
 // Extend storage is called in a store inline cache when
 // it is necessary to extend the properties array of a
 // JSObject.
-MUST_USE_RESULT MaybeObject* SharedStoreIC_ExtendStorage(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
 
@@ -1963,8 +1793,7 @@
 
 
 // Used from ic-<arch>.cc.
-MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
@@ -1978,148 +1807,6 @@
 }
 
 
-void BinaryOpIC::patch(Code* code) {
-  set_target(code);
-}
-
-
-const char* BinaryOpIC::GetName(TypeInfo type_info) {
-  switch (type_info) {
-    case UNINIT_OR_SMI: return "UninitOrSmi";
-    case DEFAULT: return "Default";
-    case GENERIC: return "Generic";
-    case HEAP_NUMBERS: return "HeapNumbers";
-    case STRINGS: return "Strings";
-    default: return "Invalid";
-  }
-}
-
-
-BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
-  switch (type_info) {
-    case UNINIT_OR_SMI:
-      return UNINITIALIZED;
-    case DEFAULT:
-    case HEAP_NUMBERS:
-    case STRINGS:
-      return MONOMORPHIC;
-    case GENERIC:
-      return MEGAMORPHIC;
-  }
-  UNREACHABLE();
-  return UNINITIALIZED;
-}
-
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Object* left,
-                                             Object* right) {
-  if (left->IsSmi() && right->IsSmi()) {
-    // If we have two smi inputs we can reach here because
-    // of an overflow. Enter default state.
-    return DEFAULT;
-  }
-
-  if (left->IsNumber() && right->IsNumber()) {
-    return HEAP_NUMBERS;
-  }
-
-  if (left->IsString() || right->IsString()) {
-    // Patching for fast string ADD makes sense even if only one of the
-    // arguments is a string.
-    return STRINGS;
-  }
-
-  return GENERIC;
-}
-
-
-// defined in code-stubs-<arch>.cc
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info);
-
-
-MUST_USE_RESULT MaybeObject* BinaryOp_Patch(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
-  ASSERT(args.length() == 5);
-
-  HandleScope scope(isolate);
-  Handle<Object> left = args.at<Object>(0);
-  Handle<Object> right = args.at<Object>(1);
-  int key = Smi::cast(args[2])->value();
-  Token::Value op = static_cast<Token::Value>(Smi::cast(args[3])->value());
-  BinaryOpIC::TypeInfo previous_type =
-      static_cast<BinaryOpIC::TypeInfo>(Smi::cast(args[4])->value());
-
-  BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(*left, *right);
-  Handle<Code> code = GetBinaryOpStub(key, type);
-  if (!code.is_null()) {
-    BinaryOpIC ic(isolate);
-    ic.patch(*code);
-    if (FLAG_trace_ic) {
-      PrintF("[BinaryOpIC (%s->%s)#%s]\n",
-             BinaryOpIC::GetName(previous_type),
-             BinaryOpIC::GetName(type),
-             Token::Name(op));
-    }
-  }
-
-  Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
-      isolate->thread_local_top()->context_->builtins(), isolate);
-  Object* builtin = NULL;  // Initialization calms down the compiler.
-  switch (op) {
-    case Token::ADD:
-      builtin = builtins->javascript_builtin(Builtins::ADD);
-      break;
-    case Token::SUB:
-      builtin = builtins->javascript_builtin(Builtins::SUB);
-      break;
-    case Token::MUL:
-      builtin = builtins->javascript_builtin(Builtins::MUL);
-      break;
-    case Token::DIV:
-      builtin = builtins->javascript_builtin(Builtins::DIV);
-      break;
-    case Token::MOD:
-      builtin = builtins->javascript_builtin(Builtins::MOD);
-      break;
-    case Token::BIT_AND:
-      builtin = builtins->javascript_builtin(Builtins::BIT_AND);
-      break;
-    case Token::BIT_OR:
-      builtin = builtins->javascript_builtin(Builtins::BIT_OR);
-      break;
-    case Token::BIT_XOR:
-      builtin = builtins->javascript_builtin(Builtins::BIT_XOR);
-      break;
-    case Token::SHR:
-      builtin = builtins->javascript_builtin(Builtins::SHR);
-      break;
-    case Token::SAR:
-      builtin = builtins->javascript_builtin(Builtins::SAR);
-      break;
-    case Token::SHL:
-      builtin = builtins->javascript_builtin(Builtins::SHL);
-      break;
-    default:
-      UNREACHABLE();
-  }
-
-  Handle<JSFunction> builtin_function(JSFunction::cast(builtin),
-                                      isolate);
-
-  bool caught_exception;
-  Object** builtin_args[] = { right.location() };
-  Handle<Object> result = Execution::Call(builtin_function,
-                                          left,
-                                          ARRAY_SIZE(builtin_args),
-                                          builtin_args,
-                                          &caught_exception);
-  if (caught_exception) {
-    return Failure::Exception();
-  }
-  return *result;
-}
-
-
 void TRBinaryOpIC::patch(Code* code) {
   set_target(code);
 }
@@ -2132,6 +1819,7 @@
     case INT32: return "Int32s";
     case HEAP_NUMBER: return "HeapNumbers";
     case ODDBALL: return "Oddball";
+    case BOTH_STRING: return "BothStrings";
     case STRING: return "Strings";
     case GENERIC: return "Generic";
     default: return "Invalid";
@@ -2147,6 +1835,7 @@
     case INT32:
     case HEAP_NUMBER:
     case ODDBALL:
+    case BOTH_STRING:
     case STRING:
       return MONOMORPHIC;
     case GENERIC:
@@ -2161,12 +1850,17 @@
                                                TRBinaryOpIC::TypeInfo y) {
   if (x == UNINITIALIZED) return y;
   if (y == UNINITIALIZED) return x;
-  if (x == STRING && y == STRING) return STRING;
-  if (x == STRING || y == STRING) return GENERIC;
-  if (x >= y) return x;
+  if (x == y) return x;
+  if (x == BOTH_STRING && y == STRING) return STRING;
+  if (x == STRING && y == BOTH_STRING) return STRING;
+  if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
+    return GENERIC;
+  }
+  if (x > y) return x;
   return y;
 }
 
+
 TRBinaryOpIC::TypeInfo TRBinaryOpIC::GetTypeInfo(Handle<Object> left,
                                                  Handle<Object> right) {
   ::v8::internal::TypeInfo left_type =
@@ -2188,9 +1882,11 @@
     return HEAP_NUMBER;
   }
 
-  if (left_type.IsString() || right_type.IsString()) {
-    // Patching for fast string ADD makes sense even if only one of the
-    // arguments is a string.
+  // Patching for fast string ADD makes sense even if only one of the
+  // arguments is a string.
+  if (left_type.IsString())  {
+    return right_type.IsString() ? BOTH_STRING : STRING;
+  } else if (right_type.IsString()) {
     return STRING;
   }
 
@@ -2209,8 +1905,7 @@
                                           TRBinaryOpIC::TypeInfo result_type);
 
 
-MaybeObject* TypeRecordingBinaryOp_Patch(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, TypeRecordingBinaryOp_Patch) {
   ASSERT(args.length() == 5);
 
   HandleScope scope(isolate);
@@ -2224,11 +1919,11 @@
   TRBinaryOpIC::TypeInfo type = TRBinaryOpIC::GetTypeInfo(left, right);
   type = TRBinaryOpIC::JoinTypes(type, previous_type);
   TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED;
-  if (type == TRBinaryOpIC::STRING && op != Token::ADD) {
+  if ((type == TRBinaryOpIC::STRING || type == TRBinaryOpIC::BOTH_STRING) &&
+      op != Token::ADD) {
     type = TRBinaryOpIC::GENERIC;
   }
-  if (type == TRBinaryOpIC::SMI &&
-      previous_type == TRBinaryOpIC::SMI) {
+  if (type == TRBinaryOpIC::SMI && previous_type == TRBinaryOpIC::SMI) {
     if (op == Token::DIV || op == Token::MUL || kSmiValueSize == 32) {
       // Arithmetic on two Smi inputs has yielded a heap number.
       // That is the only way to get here from the Smi stub.
@@ -2240,8 +1935,7 @@
       result_type = TRBinaryOpIC::INT32;
     }
   }
-  if (type == TRBinaryOpIC::INT32 &&
-      previous_type == TRBinaryOpIC::INT32) {
+  if (type == TRBinaryOpIC::INT32 && previous_type == TRBinaryOpIC::INT32) {
     // We must be here because an operation on two INT32 types overflowed.
     result_type = TRBinaryOpIC::HEAP_NUMBER;
   }
@@ -2365,8 +2059,7 @@
 
 
 // Used from ic_<arch>.cc.
-Code* CompareIC_Miss(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
   NoHandleAllocation na;
   ASSERT(args.length() == 3);
   CompareIC ic(isolate, static_cast<Token::Value>(Smi::cast(args[2])->value()));
diff --git a/src/ic.h b/src/ic.h
index bb8a981..7b7ab43 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -53,7 +53,6 @@
   ICU(LoadPropertyWithInterceptorForCall)             \
   ICU(KeyedLoadPropertyWithInterceptor)               \
   ICU(StoreInterceptorProperty)                       \
-  ICU(BinaryOp_Patch)                                 \
   ICU(TypeRecordingBinaryOp_Patch)                    \
   ICU(CompareIC_Miss)
 //
@@ -297,14 +296,6 @@
                                    bool support_wrappers);
   static void GenerateFunctionPrototype(MacroAssembler* masm);
 
-  // Clear the use of the inlined version.
-  static void ClearInlinedVersion(Address address);
-
-  // The offset from the inlined patch site to the start of the
-  // inlined load instruction.  It is architecture-dependent, and not
-  // used on ARM.
-  static const int kOffsetToLoadInstruction;
-
  private:
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -329,13 +320,6 @@
 
   static void Clear(Address address, Code* target);
 
-  static bool PatchInlinedLoad(Address address, Object* map, int index);
-
-  static bool PatchInlinedContextualLoad(Address address,
-                                         Object* map,
-                                         Object* cell,
-                                         bool is_dont_delete);
-
   friend class IC;
 };
 
@@ -362,9 +346,6 @@
 
   static void GenerateIndexedInterceptor(MacroAssembler* masm);
 
-  // Clear the use of the inlined version.
-  static void ClearInlinedVersion(Address address);
-
   // Bit mask to be tested against bit field for the cases when
   // generic stub should go into slow case.
   // Access check is necessary explicitly since generic stub does not perform
@@ -408,10 +389,6 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the map that is checked in an inlined
-  // version of keyed load.
-  static bool PatchInlinedLoad(Address address, Object* map);
-
   friend class IC;
 };
 
@@ -438,13 +415,6 @@
   static void GenerateGlobalProxy(MacroAssembler* masm,
                                   StrictModeFlag strict_mode);
 
-  // Clear the use of an inlined version.
-  static void ClearInlinedVersion(Address address);
-
-  // The offset from the inlined patch site to the start of the
-  // inlined store instruction.
-  static const int kOffsetToStoreInstruction;
-
  private:
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -490,10 +460,6 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the index and the map that is checked in an
-  // inlined version of the named store.
-  static bool PatchInlinedStore(Address address, Object* map, int index);
-
   friend class IC;
 };
 
@@ -515,12 +481,6 @@
                                          StrictModeFlag strict_mode);
   static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
 
-  // Clear the inlined version so the IC is always hit.
-  static void ClearInlinedVersion(Address address);
-
-  // Restore the inlined version so the fast case can get hit.
-  static void RestoreInlinedVersion(Address address);
-
  private:
   // Update the inline cache.
   void UpdateCaches(LookupResult* lookup,
@@ -565,42 +525,10 @@
 
   static void Clear(Address address, Code* target);
 
-  // Support for patching the map that is checked in an inlined
-  // version of keyed store.
-  // The address is the patch point for the IC call
-  // (Assembler::kCallTargetAddressOffset before the end of
-  // the call/return address).
-  // The map is the new map that the inlined code should check against.
-  static bool PatchInlinedStore(Address address, Object* map);
-
   friend class IC;
 };
 
 
-class BinaryOpIC: public IC {
- public:
-
-  enum TypeInfo {
-    UNINIT_OR_SMI,
-    DEFAULT,  // Initial state. When first executed, patches to one
-              // of the following states depending on the operands types.
-    HEAP_NUMBERS,  // Both arguments are HeapNumbers.
-    STRINGS,  // At least one of the arguments is String.
-    GENERIC   // Non-specialized case (processes any type combination).
-  };
-
-  explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { }
-
-  void patch(Code* code);
-
-  static const char* GetName(TypeInfo type_info);
-
-  static State ToState(TypeInfo type_info);
-
-  static TypeInfo GetTypeInfo(Object* left, Object* right);
-};
-
-
 // Type Recording BinaryOpIC, that records the types of the inputs and outputs.
 class TRBinaryOpIC: public IC {
  public:
@@ -611,6 +539,7 @@
     INT32,
     HEAP_NUMBER,
     ODDBALL,
+    BOTH_STRING,  // Only used for addition operation.
     STRING,  // Only used for addition operation.  At least one string operand.
     GENERIC
   };
diff --git a/src/isolate.cc b/src/isolate.cc
index a163532..e42d78e 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -54,6 +54,21 @@
 namespace v8 {
 namespace internal {
 
+Atomic32 ThreadId::highest_thread_id_ = 0;
+
+int ThreadId::AllocateThreadId() {
+  int new_id = NoBarrier_AtomicIncrement(&highest_thread_id_, 1);
+  return new_id;
+}
+
+int ThreadId::GetCurrentThreadId() {
+  int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+  if (thread_id == 0) {
+    thread_id = AllocateThreadId();
+    Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
+  }
+  return thread_id;
+}
 
 // Create a dummy thread that will wait forever on a semaphore. The only
 // purpose for this thread is to have some stack area to save essential data
@@ -245,7 +260,6 @@
 Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
 Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
 Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
-Isolate::ThreadId Isolate::highest_thread_id_ = 0;
 
 
 class IsolateInitializer {
@@ -265,20 +279,12 @@
 static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
 
 
-Isolate::ThreadId Isolate::AllocateThreadId() {
-  ThreadId new_id;
-  {
-    ScopedLock lock(process_wide_mutex_);
-    new_id = ++highest_thread_id_;
-  }
-  return new_id;
-}
+
 
 
 Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
     ThreadId thread_id) {
-  ASSERT(thread_id != 0);
-  ASSERT(Thread::GetThreadLocalInt(thread_id_key_) == thread_id);
+  ASSERT(!thread_id.Equals(ThreadId::Invalid()));
   PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
   {
     ScopedLock lock(process_wide_mutex_);
@@ -292,11 +298,7 @@
 
 Isolate::PerIsolateThreadData*
     Isolate::FindOrAllocatePerThreadDataForThisThread() {
-  ThreadId thread_id = Thread::GetThreadLocalInt(thread_id_key_);
-  if (thread_id == 0) {
-    thread_id = AllocateThreadId();
-    Thread::SetThreadLocalInt(thread_id_key_, thread_id);
-  }
+  ThreadId thread_id = ThreadId::Current();
   PerIsolateThreadData* per_thread = NULL;
   {
     ScopedLock lock(process_wide_mutex_);
@@ -361,7 +363,8 @@
 
 
 Isolate::PerIsolateThreadData*
-    Isolate::ThreadDataTable::Lookup(Isolate* isolate, ThreadId thread_id) {
+    Isolate::ThreadDataTable::Lookup(Isolate* isolate,
+                                     ThreadId thread_id) {
   for (PerIsolateThreadData* data = list_; data != NULL; data = data->next_) {
     if (data->Matches(isolate, thread_id)) return data;
   }
@@ -383,7 +386,8 @@
 }
 
 
-void Isolate::ThreadDataTable::Remove(Isolate* isolate, ThreadId thread_id) {
+void Isolate::ThreadDataTable::Remove(Isolate* isolate,
+                                      ThreadId thread_id) {
   PerIsolateThreadData* data = Lookup(isolate, thread_id);
   if (data != NULL) {
     Remove(data);
@@ -414,7 +418,6 @@
       runtime_profiler_(NULL),
       compilation_cache_(NULL),
       counters_(new Counters()),
-      cpu_features_(NULL),
       code_range_(NULL),
       break_access_(OS::CreateMutex()),
       logger_(new Logger()),
@@ -430,7 +433,7 @@
       context_slot_cache_(NULL),
       descriptor_lookup_cache_(NULL),
       handle_scope_implementer_(NULL),
-      scanner_constants_(NULL),
+      unicode_cache_(NULL),
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
@@ -565,8 +568,8 @@
   producer_heap_profile_ = NULL;
 #endif
 
-  delete scanner_constants_;
-  scanner_constants_ = NULL;
+  delete unicode_cache_;
+  unicode_cache_ = NULL;
 
   delete regexp_stack_;
   regexp_stack_ = NULL;
@@ -593,8 +596,6 @@
 
   delete counters_;
   counters_ = NULL;
-  delete cpu_features_;
-  cpu_features_ = NULL;
 
   delete handle_scope_implementer_;
   handle_scope_implementer_ = NULL;
@@ -675,12 +676,11 @@
   keyed_lookup_cache_ = new KeyedLookupCache();
   context_slot_cache_ = new ContextSlotCache();
   descriptor_lookup_cache_ = new DescriptorLookupCache();
-  scanner_constants_ = new ScannerConstants();
+  unicode_cache_ = new UnicodeCache();
   pc_to_code_cache_ = new PcToCodeCache(this);
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
-  cpu_features_ = new CpuFeatures();
   handle_scope_implementer_ = new HandleScopeImplementer();
   stub_cache_ = new StubCache(this);
   ast_sentinels_ = new AstSentinels();
@@ -705,6 +705,33 @@
 }
 
 
+void Isolate::PropagatePendingExceptionToExternalTryCatch() {
+  ASSERT(has_pending_exception());
+
+  bool external_caught = IsExternallyCaught();
+  thread_local_top_.external_caught_exception_ = external_caught;
+
+  if (!external_caught) return;
+
+  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+    // Do not propagate OOM exception: we should kill VM asap.
+  } else if (thread_local_top_.pending_exception_ ==
+             heap()->termination_exception()) {
+    try_catch_handler()->can_continue_ = false;
+    try_catch_handler()->exception_ = heap()->null_value();
+  } else {
+    // At this point all non-object (failure) exceptions have
+    // been dealt with so this shouldn't fail.
+    ASSERT(!pending_exception()->IsFailure());
+    try_catch_handler()->can_continue_ = true;
+    try_catch_handler()->exception_ = pending_exception();
+    if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+      try_catch_handler()->message_ = thread_local_top_.pending_message_obj_;
+    }
+  }
+}
+
+
 bool Isolate::Init(Deserializer* des) {
   ASSERT(state_ != INITIALIZED);
 
@@ -725,9 +752,6 @@
   CpuProfiler::Setup();
   HeapProfiler::Setup();
 
-  // Setup the platform OS support.
-  OS::Setup();
-
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
@@ -786,11 +810,6 @@
   // stack guard.
   heap_.SetStackLimits();
 
-  // Setup the CPU support. Must be done after heap setup and after
-  // any deserialization because we have to have the initial heap
-  // objects in place for creating the code object used for probing.
-  CPU::Setup();
-
   deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
   runtime_profiler_->Setup();
@@ -818,8 +837,8 @@
       ASSERT(Current() == this);
       ASSERT(entry_stack_ != NULL);
       ASSERT(entry_stack_->previous_thread_data == NULL ||
-             entry_stack_->previous_thread_data->thread_id() ==
-                 Thread::GetThreadLocalInt(thread_id_key_));
+             entry_stack_->previous_thread_data->thread_id().Equals(
+                 ThreadId::Current()));
       // Same thread re-enters the isolate, no need to re-init anything.
       entry_stack_->entry_count++;
       return;
@@ -857,8 +876,8 @@
 void Isolate::Exit() {
   ASSERT(entry_stack_ != NULL);
   ASSERT(entry_stack_->previous_thread_data == NULL ||
-         entry_stack_->previous_thread_data->thread_id() ==
-             Thread::GetThreadLocalInt(thread_id_key_));
+         entry_stack_->previous_thread_data->thread_id().Equals(
+             ThreadId::Current()));
 
   if (--entry_stack_->entry_count > 0) return;
 
diff --git a/src/isolate.h b/src/isolate.h
index 03a4866..35ffcb4 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -72,7 +72,7 @@
 class ProducerHeapProfile;
 class RegExpStack;
 class SaveContext;
-class ScannerConstants;
+class UnicodeCache;
 class StringInputBuffer;
 class StringTracker;
 class StubCache;
@@ -136,8 +136,59 @@
 #endif
 
 
+// Platform-independent, reliable thread identifier.
+class ThreadId {
+ public:
+  // Creates an invalid ThreadId.
+  ThreadId() : id_(kInvalidId) {}
+
+  // Returns ThreadId for current thread.
+  static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
+
+  // Returns invalid ThreadId (guaranteed not to be equal to any thread).
+  static ThreadId Invalid() { return ThreadId(kInvalidId); }
+
+  // Compares ThreadIds for equality.
+  INLINE(bool Equals(const ThreadId& other) const) {
+    return id_ == other.id_;
+  }
+
+  // Checks whether this ThreadId refers to any thread.
+  INLINE(bool IsValid() const) {
+    return id_ != kInvalidId;
+  }
+
+  // Converts ThreadId to an integer representation
+  // (required for public API: V8::V8::GetCurrentThreadId).
+  int ToInteger() const { return id_; }
+
+  // Converts ThreadId to an integer representation
+  // (required for public API: V8::V8::TerminateExecution).
+  static ThreadId FromInteger(int id) { return ThreadId(id); }
+
+ private:
+  static const int kInvalidId = -1;
+
+  explicit ThreadId(int id) : id_(id) {}
+
+  static int AllocateThreadId();
+
+  static int GetCurrentThreadId();
+
+  int id_;
+
+  static Atomic32 highest_thread_id_;
+
+  friend class Isolate;
+};
+
+
 class ThreadLocalTop BASE_EMBEDDED {
  public:
+  // Does early low-level initialization that does not depend on the
+  // isolate being present.
+  ThreadLocalTop();
+
   // Initialize the thread data.
   void Initialize();
 
@@ -176,10 +227,9 @@
   // The context where the current execution method is created and for variable
   // lookups.
   Context* context_;
-  int thread_id_;
+  ThreadId thread_id_;
   MaybeObject* pending_exception_;
   bool has_pending_message_;
-  const char* pending_message_;
   Object* pending_message_obj_;
   Script* pending_message_script_;
   int pending_message_start_pos_;
@@ -218,6 +268,8 @@
   v8::FailedAccessCheckCallback failed_access_check_callback_;
 
  private:
+  void InitializeInternal();
+
   Address try_catch_handler_address_;
 };
 
@@ -242,6 +294,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 #define ISOLATE_DEBUGGER_INIT_LIST(V)                                          \
+  V(uint64_t, enabled_cpu_features, 0)                                         \
   V(v8::Debug::EventCallback, debug_event_callback, NULL)                      \
   V(DebuggerAgent*, debugger_agent_instance, NULL)
 #else
@@ -315,6 +368,8 @@
   /* AstNode state. */                                                         \
   V(unsigned, ast_node_id, 0)                                                  \
   V(unsigned, ast_node_count, 0)                                               \
+  /* SafeStackFrameIterator activations count. */                              \
+  V(int, safe_stack_iterator_counter, 0)                                       \
   ISOLATE_PLATFORM_INIT_LIST(V)                                                \
   ISOLATE_LOGGING_INIT_LIST(V)                                                 \
   ISOLATE_DEBUGGER_INIT_LIST(V)
@@ -327,8 +382,6 @@
  public:
   ~Isolate();
 
-  typedef int ThreadId;
-
   // A thread has a PerIsolateThreadData instance for each isolate that it has
   // entered. That instance is allocated when the isolate is initially entered
   // and reused on subsequent entries.
@@ -361,7 +414,7 @@
 #endif
 
     bool Matches(Isolate* isolate, ThreadId thread_id) const {
-      return isolate_ == isolate && thread_id_ == thread_id;
+      return isolate_ == isolate && thread_id_.Equals(thread_id);
     }
 
    private:
@@ -453,9 +506,6 @@
     return thread_id_key_;
   }
 
-  // Atomically allocates a new thread ID.
-  static ThreadId AllocateThreadId();
-
   // If a client attempts to create a Locker without specifying an isolate,
   // we assume that the client is using legacy behavior. Set up the current
   // thread to be inside the implicit isolate (or fail a check if we have
@@ -481,8 +531,8 @@
   }
 
   // Access to current thread id.
-  int thread_id() { return thread_local_top_.thread_id_; }
-  void set_thread_id(int id) { thread_local_top_.thread_id_ = id; }
+  ThreadId thread_id() { return thread_local_top_.thread_id_; }
+  void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
 
   // Interface to pending exception.
   MaybeObject* pending_exception() {
@@ -492,6 +542,9 @@
   bool external_caught_exception() {
     return thread_local_top_.external_caught_exception_;
   }
+  void set_external_caught_exception(bool value) {
+    thread_local_top_.external_caught_exception_ = value;
+  }
   void set_pending_exception(MaybeObject* exception) {
     thread_local_top_.pending_exception_ = exception;
   }
@@ -506,7 +559,6 @@
   }
   void clear_pending_message() {
     thread_local_top_.has_pending_message_ = false;
-    thread_local_top_.pending_message_ = NULL;
     thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
     thread_local_top_.pending_message_script_ = NULL;
   }
@@ -519,6 +571,12 @@
   bool* external_caught_exception_address() {
     return &thread_local_top_.external_caught_exception_;
   }
+  v8::TryCatch* catcher() {
+    return thread_local_top_.catcher_;
+  }
+  void set_catcher(v8::TryCatch* catcher) {
+    thread_local_top_.catcher_ = catcher;
+  }
 
   MaybeObject** scheduled_exception_address() {
     return &thread_local_top_.scheduled_exception_;
@@ -589,6 +647,27 @@
   // JavaScript code.  If an exception is scheduled true is returned.
   bool OptionalRescheduleException(bool is_bottom_call);
 
+  class ExceptionScope {
+   public:
+    explicit ExceptionScope(Isolate* isolate) :
+      // Scope currently can only be used for regular exceptions, not
+      // failures like OOM or termination exception.
+      isolate_(isolate),
+      pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
+      catcher_(isolate_->catcher())
+    { }
+
+    ~ExceptionScope() {
+      isolate_->set_catcher(catcher_);
+      isolate_->set_pending_exception(*pending_exception_);
+    }
+
+   private:
+    Isolate* isolate_;
+    Handle<Object> pending_exception_;
+    v8::TryCatch* catcher_;
+  };
+
   void SetCaptureStackTraceForUncaughtExceptions(
       bool capture,
       int frame_limit,
@@ -633,9 +712,7 @@
 
   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
   Failure* PromoteScheduledException();
-  void DoThrow(MaybeObject* exception,
-               MessageLocation* location,
-               const char* message);
+  void DoThrow(MaybeObject* exception, MessageLocation* location);
   // Checks if exception should be reported and finds out if it's
   // caught externally.
   bool ShouldReportException(bool* can_be_caught_externally,
@@ -708,10 +785,6 @@
 
   Bootstrapper* bootstrapper() { return bootstrapper_; }
   Counters* counters() { return counters_; }
-  // TODO(isolates): Having CPU features per isolate is probably too
-  // flexible. We only really need to have the set of currently
-  // enabled features for asserts in DEBUG builds.
-  CpuFeatures* cpu_features() { return cpu_features_; }
   CodeRange* code_range() { return code_range_; }
   RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
   CompilationCache* compilation_cache() { return compilation_cache_; }
@@ -752,8 +825,8 @@
   }
   Zone* zone() { return &zone_; }
 
-  ScannerConstants* scanner_constants() {
-    return scanner_constants_;
+  UnicodeCache* unicode_cache() {
+    return unicode_cache_;
   }
 
   PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
@@ -898,13 +971,19 @@
 
   void SetCurrentVMState(StateTag state) {
     if (RuntimeProfiler::IsEnabled()) {
-      if (state == JS) {
-        // JS or non-JS -> JS transition.
+      StateTag current_state = thread_local_top_.current_vm_state_;
+      if (current_state != JS && state == JS) {
+        // Non-JS -> JS transition.
         RuntimeProfiler::IsolateEnteredJS(this);
-      } else if (thread_local_top_.current_vm_state_ == JS) {
+      } else if (current_state == JS && state != JS) {
         // JS -> non-JS transition.
         ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
         RuntimeProfiler::IsolateExitedJS(this);
+      } else {
+        // Other types of state transitions are not interesting to the
+        // runtime profiler, because they don't affect whether we're
+        // in JS or not.
+        ASSERT((current_state == JS) == (state == JS));
       }
     }
     thread_local_top_.current_vm_state_ = state;
@@ -965,7 +1044,6 @@
   static Thread::LocalStorageKey thread_id_key_;
   static Isolate* default_isolate_;
   static ThreadDataTable* thread_data_table_;
-  static ThreadId highest_thread_id_;
 
   bool PreInit();
 
@@ -1018,6 +1096,8 @@
 
   void FillCache();
 
+  void PropagatePendingExceptionToExternalTryCatch();
+
   int stack_trace_nesting_level_;
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
@@ -1029,7 +1109,6 @@
   RuntimeProfiler* runtime_profiler_;
   CompilationCache* compilation_cache_;
   Counters* counters_;
-  CpuFeatures* cpu_features_;
   CodeRange* code_range_;
   Mutex* break_access_;
   Heap heap_;
@@ -1049,7 +1128,7 @@
   DescriptorLookupCache* descriptor_lookup_cache_;
   v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
-  ScannerConstants* scanner_constants_;
+  UnicodeCache* unicode_cache_;
   Zone zone_;
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
@@ -1124,6 +1203,7 @@
 
   friend class ExecutionAccess;
   friend class IsolateInitializer;
+  friend class ThreadId;
   friend class v8::Isolate;
   friend class v8::Locker;
 
@@ -1146,7 +1226,7 @@
     isolate->set_save_context(this);
 
     // If there is no JS frame under the current C frame, use the value 0.
-    JavaScriptFrameIterator it;
+    JavaScriptFrameIterator it(isolate);
     js_sp_ = it.done() ? 0 : it.frame()->sp();
   }
 
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 3ed5a7e..b9b2f60 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1447,7 +1447,7 @@
 
 class OffsetsVector {
  public:
-  inline OffsetsVector(int num_registers)
+  explicit inline OffsetsVector(int num_registers)
       : offsets_vector_length_(num_registers) {
     if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
       vector_ = NewArray<int>(offsets_vector_length_);
diff --git a/src/jump-target-heavy-inl.h b/src/jump-target-heavy-inl.h
deleted file mode 100644
index 0a2a569..0000000
--- a/src/jump-target-heavy-inl.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_INL_H_
-#define V8_JUMP_TARGET_HEAVY_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
-  FrameElement* element = &entry_frame_->elements_[index];
-  element->clear_copied();
-  if (target->is_register()) {
-    entry_frame_->set_register_location(target->reg(), index);
-  } else if (target->is_copy()) {
-    entry_frame_->elements_[target->index()].set_copied();
-  }
-  if (direction_ == BIDIRECTIONAL && !target->is_copy()) {
-    element->set_type_info(TypeInfo::Unknown());
-  }
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_HEAVY_INL_H_
diff --git a/src/jump-target-heavy.cc b/src/jump-target-heavy.cc
deleted file mode 100644
index f73e027..0000000
--- a/src/jump-target-heavy.cc
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-void JumpTarget::Jump(Result* arg) {
-  ASSERT(cgen()->has_valid_frame());
-
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-#ifdef DEBUG
-  Result::Type arg_type = arg->type();
-  Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
-
-  cgen()->frame()->Push(arg);
-  DoBranch(cc, hint);
-  *arg = cgen()->frame()->Pop();
-
-  ASSERT(arg->type() == arg_type);
-  ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-#ifdef DEBUG
-  Result::Type arg0_type = arg0->type();
-  Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg;
-  Result::Type arg1_type = arg1->type();
-  Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg;
-#endif
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  DoBranch(cc, hint);
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-
-  ASSERT(arg0->type() == arg0_type);
-  ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg));
-  ASSERT(arg1->type() == arg1_type);
-  ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg));
-}
-
-
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    Jump(arg);  // May emit merge code here.
-    fall_through.Bind();
-  } else {
-#ifdef DEBUG
-    Result::Type arg_type = arg->type();
-    Register arg_reg = arg->is_register() ? arg->reg() : no_reg;
-#endif
-    cgen()->frame()->Push(arg);
-    DoBranch(cc, hint);
-    *arg = cgen()->frame()->Pop();
-    ASSERT(arg->type() == arg_type);
-    ASSERT(!arg->is_register() || arg->reg().is(arg_reg));
-  }
-}
-
-
-void JumpTarget::Bind(Result* arg) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0, Result* arg1) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg0);
-    cgen()->frame()->Push(arg1);
-  }
-  DoBind();
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::ComputeEntryFrame() {
-  // Given: a collection of frames reaching by forward CFG edges and
-  // the directionality of the block.  Compute: an entry frame for the
-  // block.
-
-  Isolate::Current()->counters()->compute_entry_frame()->Increment();
-#ifdef DEBUG
-  if (Isolate::Current()->jump_target_compiling_deferred_code()) {
-    ASSERT(reaching_frames_.length() > 1);
-    VirtualFrame* frame = reaching_frames_[0];
-    bool all_identical = true;
-    for (int i = 1; i < reaching_frames_.length(); i++) {
-      if (!frame->Equals(reaching_frames_[i])) {
-        all_identical = false;
-        break;
-      }
-    }
-    ASSERT(!all_identical || all_identical);
-  }
-#endif
-
-  // Choose an initial frame.
-  VirtualFrame* initial_frame = reaching_frames_[0];
-
-  // A list of pointers to frame elements in the entry frame.  NULL
-  // indicates that the element has not yet been determined.
-  int length = initial_frame->element_count();
-  ZoneList<FrameElement*> elements(length);
-
-  // Initially populate the list of elements based on the initial
-  // frame.
-  for (int i = 0; i < length; i++) {
-    FrameElement element = initial_frame->elements_[i];
-    // We do not allow copies or constants in bidirectional frames.
-    if (direction_ == BIDIRECTIONAL) {
-      if (element.is_constant() || element.is_copy()) {
-        elements.Add(NULL);
-        continue;
-      }
-    }
-    elements.Add(&initial_frame->elements_[i]);
-  }
-
-  // Compute elements based on the other reaching frames.
-  if (reaching_frames_.length() > 1) {
-    for (int i = 0; i < length; i++) {
-      FrameElement* element = elements[i];
-      for (int j = 1; j < reaching_frames_.length(); j++) {
-        // Element computation is monotonic: new information will not
-        // change our decision about undetermined or invalid elements.
-        if (element == NULL || !element->is_valid()) break;
-
-        FrameElement* other = &reaching_frames_[j]->elements_[i];
-        element = element->Combine(other);
-        if (element != NULL && !element->is_copy()) {
-          ASSERT(other != NULL);
-          // We overwrite the number information of one of the incoming frames.
-          // This is safe because we only use the frame for emitting merge code.
-          // The number information of incoming frames is not used anymore.
-          element->set_type_info(TypeInfo::Combine(element->type_info(),
-                                                   other->type_info()));
-        }
-      }
-      elements[i] = element;
-    }
-  }
-
-  // Build the new frame.  A freshly allocated frame has memory elements
-  // for the parameters and some platform-dependent elements (e.g.,
-  // return address).  Replace those first.
-  entry_frame_ = new VirtualFrame();
-  int index = 0;
-  for (; index < entry_frame_->element_count(); index++) {
-    FrameElement* target = elements[index];
-    // If the element is determined, set it now.  Count registers.  Mark
-    // elements as copied exactly when they have a copy.  Undetermined
-    // elements are initially recorded as if in memory.
-    if (target != NULL) {
-      entry_frame_->elements_[index] = *target;
-      InitializeEntryElement(index, target);
-    }
-  }
-  // Then fill in the rest of the frame with new elements.
-  for (; index < length; index++) {
-    FrameElement* target = elements[index];
-    if (target == NULL) {
-      entry_frame_->elements_.Add(
-          FrameElement::MemoryElement(TypeInfo::Uninitialized()));
-    } else {
-      entry_frame_->elements_.Add(*target);
-      InitializeEntryElement(index, target);
-    }
-  }
-
-  // Allocate any still-undetermined frame elements to registers or
-  // memory, from the top down.
-  for (int i = length - 1; i >= 0; i--) {
-    if (elements[i] == NULL) {
-      // Loop over all the reaching frames to check whether the element
-      // is synced on all frames and to count the registers it occupies.
-      bool is_synced = true;
-      RegisterFile candidate_registers;
-      int best_count = kMinInt;
-      int best_reg_num = RegisterAllocator::kInvalidRegister;
-      TypeInfo info = TypeInfo::Uninitialized();
-
-      for (int j = 0; j < reaching_frames_.length(); j++) {
-        FrameElement element = reaching_frames_[j]->elements_[i];
-        if (direction_ == BIDIRECTIONAL) {
-          info = TypeInfo::Unknown();
-        } else if (!element.is_copy()) {
-          info = TypeInfo::Combine(info, element.type_info());
-        } else {
-          // New elements will not be copies, so get number information from
-          // backing element in the reaching frame.
-          info = TypeInfo::Combine(info,
-            reaching_frames_[j]->elements_[element.index()].type_info());
-        }
-        is_synced = is_synced && element.is_synced();
-        if (element.is_register() && !entry_frame_->is_used(element.reg())) {
-          // Count the register occurrence and remember it if better
-          // than the previous best.
-          int num = RegisterAllocator::ToNumber(element.reg());
-          candidate_registers.Use(num);
-          if (candidate_registers.count(num) > best_count) {
-            best_count = candidate_registers.count(num);
-            best_reg_num = num;
-          }
-        }
-      }
-
-      // We must have a number type information now (not for copied elements).
-      ASSERT(entry_frame_->elements_[i].is_copy()
-             || !info.IsUninitialized());
-
-      // If the value is synced on all frames, put it in memory.  This
-      // costs nothing at the merge code but will incur a
-      // memory-to-register move when the value is needed later.
-      if (is_synced) {
-        // Already recorded as a memory element.
-        // Set combined number info.
-        entry_frame_->elements_[i].set_type_info(info);
-        continue;
-      }
-
-      // Try to put it in a register.  If there was no best choice
-      // consider any free register.
-      if (best_reg_num == RegisterAllocator::kInvalidRegister) {
-        for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) {
-          if (!entry_frame_->is_used(j)) {
-            best_reg_num = j;
-            break;
-          }
-        }
-      }
-
-      if (best_reg_num != RegisterAllocator::kInvalidRegister) {
-        // If there was a register choice, use it.  Preserve the copied
-        // flag on the element.
-        bool is_copied = entry_frame_->elements_[i].is_copied();
-        Register reg = RegisterAllocator::ToRegister(best_reg_num);
-        entry_frame_->elements_[i] =
-            FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED,
-                                          TypeInfo::Uninitialized());
-        if (is_copied) entry_frame_->elements_[i].set_copied();
-        entry_frame_->set_register_location(reg, i);
-      }
-      // Set combined number info.
-      entry_frame_->elements_[i].set_type_info(info);
-    }
-  }
-
-  // If we have incoming backward edges assert we forget all number information.
-#ifdef DEBUG
-  if (direction_ == BIDIRECTIONAL) {
-    for (int i = 0; i < length; ++i) {
-      if (!entry_frame_->elements_[i].is_copy()) {
-        ASSERT(entry_frame_->elements_[i].type_info().IsUnknown());
-      }
-    }
-  }
-#endif
-
-  // The stack pointer is at the highest synced element or the base of
-  // the expression stack.
-  int stack_pointer = length - 1;
-  while (stack_pointer >= entry_frame_->expression_base_index() &&
-         !entry_frame_->elements_[stack_pointer].is_synced()) {
-    stack_pointer--;
-  }
-  entry_frame_->stack_pointer_ = stack_pointer;
-}
-
-
-FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
-  // Copy the register locations from the code generator's frame.
-  // These are the registers that will be spilled on entry to the
-  // deferred code and restored on exit.
-  int sp_offset = frame->fp_relative(frame->stack_pointer_);
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int loc = frame->register_location(i);
-    if (loc == VirtualFrame::kIllegalIndex) {
-      registers_[i] = kIgnore;
-    } else if (frame->elements_[loc].is_synced()) {
-      // Needs to be restored on exit but not saved on entry.
-      registers_[i] = frame->fp_relative(loc) | kSyncedFlag;
-    } else {
-      int offset = frame->fp_relative(loc);
-      registers_[i] = (offset < sp_offset) ? kPush : offset;
-    }
-  }
-}
-
-
-void JumpTarget::Unuse() {
-  reaching_frames_.Clear();
-  merge_labels_.Clear();
-  entry_frame_ = NULL;
-  entry_label_.Unuse();
-}
-
-
-void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
-  ASSERT(reaching_frames_.length() == merge_labels_.length());
-  ASSERT(entry_frame_ == NULL);
-  Label fresh;
-  merge_labels_.Add(fresh);
-  reaching_frames_.Add(frame);
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-void BreakTarget::set_direction(Directionality direction) {
-  JumpTarget::set_direction(direction);
-  ASSERT(cgen()->has_valid_frame());
-  expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::CopyTo(BreakTarget* destination) {
-  ASSERT(destination != NULL);
-  destination->direction_ = direction_;
-  destination->reaching_frames_.Rewind(0);
-  destination->reaching_frames_.AddAll(reaching_frames_);
-  destination->merge_labels_.Rewind(0);
-  destination->merge_labels_.AddAll(merge_labels_);
-  destination->entry_frame_ = entry_frame_;
-  destination->entry_label_ = entry_label_;
-  destination->expected_height_ = expected_height_;
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    Jump();  // May emit merge code here.
-    fall_through.Bind();
-  } else {
-    DoBranch(cc, hint);
-  }
-}
-
-
-DeferredCode::DeferredCode()
-    : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
-      statement_position_(masm_->positions_recorder()->
-                          current_statement_position()),
-      position_(masm_->positions_recorder()->current_position()),
-      frame_state_(CodeGeneratorScope::Current(Isolate::Current())->frame()) {
-  ASSERT(statement_position_ != RelocInfo::kNoPosition);
-  ASSERT(position_ != RelocInfo::kNoPosition);
-
-  CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
-#ifdef DEBUG
-  comment_ = "";
-#endif
-}
-
-} }  // namespace v8::internal
diff --git a/src/jump-target-heavy.h b/src/jump-target-heavy.h
deleted file mode 100644
index bf97756..0000000
--- a/src/jump-target-heavy.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_HEAVY_H_
-#define V8_JUMP_TARGET_HEAVY_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-class VirtualFrame;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code.  It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths.  When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame.  For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
- public:
-  // Forward-only jump targets can only be reached by forward CFG edges.
-  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
-  // Construct a jump target used to generate code and to provide
-  // access to a current frame.
-  explicit JumpTarget(Directionality direction)
-      : direction_(direction),
-        reaching_frames_(0),
-        merge_labels_(0),
-        entry_frame_(NULL) {
-  }
-
-  // Construct a jump target.
-  JumpTarget()
-      : direction_(FORWARD_ONLY),
-        reaching_frames_(0),
-        merge_labels_(0),
-        entry_frame_(NULL) {
-  }
-
-  virtual ~JumpTarget() {}
-
-  // Set the direction of the jump target.
-  virtual void set_direction(Directionality direction) {
-    direction_ = direction;
-  }
-
-  // Treat the jump target as a fresh one.  The state is reset.
-  void Unuse();
-
-  inline CodeGenerator* cgen();
-
-  Label* entry_label() { return &entry_label_; }
-
-  VirtualFrame* entry_frame() const { return entry_frame_; }
-  void set_entry_frame(VirtualFrame* frame) {
-    entry_frame_ = frame;
-  }
-
-  // Predicates testing the state of the encapsulated label.
-  bool is_bound() const { return entry_label_.is_bound(); }
-  bool is_linked() const {
-    return !is_bound() && !reaching_frames_.is_empty();
-  }
-  bool is_unused() const {
-    // This is !is_bound() && !is_linked().
-    return !is_bound() && reaching_frames_.is_empty();
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-  virtual void Jump(Result* arg);
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.  The arg is a result that is live both at
-  // the target and the fall-through.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-  void Branch(Condition cc,
-              Result* arg0,
-              Result* arg1,
-              Hint hint = no_hint);
-
-  // Bind a jump target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-  virtual void Bind(Result* arg);
-  void Bind(Result* arg0, Result* arg1);
-
-  // Emit a call to a jump target.  There must be a current frame at
-  // the call.  The frame at the target is the same as the current
-  // frame except for an extra return address on top of it.  The frame
-  // after the call is the same as the frame before the call.
-  void Call();
-
- protected:
-  // Directionality flag set at initialization time.
-  Directionality direction_;
-
-  // A list of frames reaching this block via forward jumps.
-  ZoneList<VirtualFrame*> reaching_frames_;
-
-  // A parallel list of labels for merge code.
-  ZoneList<Label> merge_labels_;
-
-  // The frame used on entry to the block and expected at backward
-  // jumps to the block.  Set when the jump target is bound, but may
-  // or may not be set for forward-only blocks.
-  VirtualFrame* entry_frame_;
-
-  // The actual entry label of the block.
-  Label entry_label_;
-
-  // Implementations of Jump, Branch, and Bind with all arguments and
-  // return values using the virtual frame.
-  void DoJump();
-  void DoBranch(Condition cc, Hint hint);
-  void DoBind();
-
- private:
-  // Add a virtual frame reaching this labeled block via a forward jump,
-  // and a corresponding merge code label.
-  void AddReachingFrame(VirtualFrame* frame);
-
-  // Perform initialization required during entry frame computation
-  // after setting the virtual frame element at index in frame to be
-  // target.
-  inline void InitializeEntryElement(int index, FrameElement* target);
-
-  // Compute a frame to use for entry to this block.
-  void ComputeEntryFrame();
-
-  DISALLOW_COPY_AND_ASSIGN(JumpTarget);
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally).  They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
-  // Construct a break target.
-  BreakTarget() {}
-  explicit BreakTarget(JumpTarget::Directionality direction)
-    : JumpTarget(direction) { }
-
-  virtual ~BreakTarget() {}
-
-  // Set the direction of the break target.
-  virtual void set_direction(Directionality direction);
-
-  // Copy the state of this break target to the destination.  The
-  // lists of forward-reaching frames and merge-point labels are
-  // copied.  All virtual frame pointers are copied, not the
-  // pointed-to frames.  The previous state of the destination is
-  // overwritten, without deallocating pointed-to virtual frames.
-  void CopyTo(BreakTarget* destination);
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-  virtual void Jump(Result* arg);
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-
-  // Bind a break target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-  virtual void Bind(Result* arg);
-
-  // Setter for expected height.
-  void set_expected_height(int expected) { expected_height_ = expected; }
-
- private:
-  // The expected height of the expression stack where the target will
-  // be bound, statically known at initialization time.
-  int expected_height_;
-
-  DISALLOW_COPY_AND_ASSIGN(BreakTarget);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_HEAVY_H_
diff --git a/src/jump-target-inl.h b/src/jump-target-inl.h
deleted file mode 100644
index 545328c..0000000
--- a/src/jump-target-inl.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_INL_H_
-#define V8_JUMP_TARGET_INL_H_
-
-#include "virtual-frame-inl.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "jump-target-heavy-inl.h"
-#else
-#include "jump-target-light-inl.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-CodeGenerator* JumpTarget::cgen() {
-  return CodeGeneratorScope::Current(Isolate::Current());
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_INL_H_
diff --git a/src/jump-target-light-inl.h b/src/jump-target-light-inl.h
deleted file mode 100644
index e8f1a5f..0000000
--- a/src/jump-target-light-inl.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_INL_H_
-#define V8_JUMP_TARGET_LIGHT_INL_H_
-
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// Construct a jump target.
-JumpTarget::JumpTarget(Directionality direction)
-    : entry_frame_set_(false),
-      direction_(direction),
-      entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-JumpTarget::JumpTarget()
-    : entry_frame_set_(false),
-      direction_(FORWARD_ONLY),
-      entry_frame_(kInvalidVirtualFrameInitializer) {
-}
-
-
-BreakTarget::BreakTarget() { }
-BreakTarget::BreakTarget(JumpTarget::Directionality direction)
-  : JumpTarget(direction) { }
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/src/jump-target-light.cc b/src/jump-target-light.cc
deleted file mode 100644
index 1d89474..0000000
--- a/src/jump-target-light.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-DeferredCode::DeferredCode()
-    : masm_(CodeGeneratorScope::Current(Isolate::Current())->masm()),
-      statement_position_(masm_->positions_recorder()->
-                          current_statement_position()),
-      position_(masm_->positions_recorder()->current_position()),
-      frame_state_(*CodeGeneratorScope::Current(Isolate::Current())->frame()) {
-  ASSERT(statement_position_ != RelocInfo::kNoPosition);
-  ASSERT(position_ != RelocInfo::kNoPosition);
-
-  CodeGeneratorScope::Current(Isolate::Current())->AddDeferred(this);
-
-#ifdef DEBUG
-  comment_ = "";
-#endif
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-
-void BreakTarget::SetExpectedHeight() {
-  expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::Jump() {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    cgen()->frame()->Drop(count);
-  }
-  DoJump();
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
-  if (cc == al) {
-    Jump();
-    return;
-  }
-
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    // Emit merge code.
-    cgen()->frame()->Drop(count);
-    DoJump();
-    fall_through.Bind();
-  } else {
-    DoBranch(cc, hint);
-  }
-}
-
-
-void BreakTarget::Bind() {
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    if (count > 0) {
-      cgen()->frame()->Drop(count);
-    }
-  }
-  DoBind();
-}
-
-} }  // namespace v8::internal
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
deleted file mode 100644
index 0d65306..0000000
--- a/src/jump-target-light.h
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_LIGHT_H_
-#define V8_JUMP_TARGET_LIGHT_H_
-
-#include "macro-assembler.h"
-#include "zone-inl.h"
-#include "virtual-frame.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class FrameElement;
-class Result;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code.  It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths.  When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame.  For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
- public:
-  // Forward-only jump targets can only be reached by forward CFG edges.
-  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
-  // Construct a jump target.
-  explicit inline JumpTarget(Directionality direction);
-
-  inline JumpTarget();
-
-  virtual ~JumpTarget() {}
-
-  void Unuse() {
-    entry_frame_set_ = false;
-    entry_label_.Unuse();
-  }
-
-  inline CodeGenerator* cgen();
-
-  Label* entry_label() { return &entry_label_; }
-
-  const VirtualFrame* entry_frame() const {
-    return entry_frame_set_ ? &entry_frame_ : NULL;
-  }
-
-  void set_entry_frame(VirtualFrame* frame) {
-    entry_frame_ = *frame;
-    entry_frame_set_ = true;
-  }
-
-  // Predicates testing the state of the encapsulated label.
-  bool is_bound() const { return entry_label_.is_bound(); }
-  bool is_linked() const { return entry_label_.is_linked(); }
-  bool is_unused() const { return entry_label_.is_unused(); }
-
-  // Copy the state of this jump target to the destination.
-  inline void CopyTo(JumpTarget* destination) {
-    *destination = *this;
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-
-  // Bind a jump target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-
-  // Emit a call to a jump target.  There must be a current frame at
-  // the call.  The frame at the target is the same as the current
-  // frame except for an extra return address on top of it.  The frame
-  // after the call is the same as the frame before the call.
-  void Call();
-
- protected:
-  // Has an entry frame been found?
-  bool entry_frame_set_;
-
-  // Can we branch backwards to this label?
-  Directionality direction_;
-
-  // The frame used on entry to the block and expected at backward
-  // jumps to the block.  Set the first time something branches to this
-  // jump target.
-  VirtualFrame entry_frame_;
-
-  // The actual entry label of the block.
-  Label entry_label_;
-
-  // Implementations of Jump, Branch, and Bind with all arguments and
-  // return values using the virtual frame.
-  void DoJump();
-  void DoBranch(Condition cc, Hint hint);
-  void DoBind();
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally).  They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
-  // Construct a break target.
-  inline BreakTarget();
-
-  inline BreakTarget(JumpTarget::Directionality direction);
-
-  virtual ~BreakTarget() {}
-
-  // Copy the state of this jump target to the destination.
-  inline void CopyTo(BreakTarget* destination) {
-    *destination = *this;
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-
-  // Bind a break target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-
-  // Setter for expected height.
-  void set_expected_height(int expected) { expected_height_ = expected; }
-
-  // Uses the current frame to set the expected height.
-  void SetExpectedHeight();
-
- private:
-  // The expected height of the expression stack where the target will
-  // be bound, statically known at initialization time.
-  int expected_height_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_LIGHT_H_
diff --git a/src/jump-target.cc b/src/jump-target.cc
deleted file mode 100644
index 72aada8..0000000
--- a/src/jump-target.cc
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-void JumpTarget::Jump() {
-  DoJump();
-}
-
-
-void JumpTarget::Branch(Condition cc, Hint hint) {
-  DoBranch(cc, hint);
-}
-
-
-void JumpTarget::Bind() {
-  DoBind();
-}
-
-
-// -------------------------------------------------------------------------
-// ShadowTarget implementation.
-
-ShadowTarget::ShadowTarget(BreakTarget* shadowed) {
-  ASSERT(shadowed != NULL);
-  other_target_ = shadowed;
-
-#ifdef DEBUG
-  is_shadowing_ = true;
-#endif
-  // While shadowing this shadow target saves the state of the original.
-  shadowed->CopyTo(this);
-
-  // The original's state is reset.
-  shadowed->Unuse();
-  ASSERT(cgen()->has_valid_frame());
-  shadowed->set_expected_height(cgen()->frame()->height());
-}
-
-
-void ShadowTarget::StopShadowing() {
-  ASSERT(is_shadowing_);
-
-  // The states of this target, which was shadowed, and the original
-  // target, which was shadowing, are swapped.
-  BreakTarget temp;
-  other_target_->CopyTo(&temp);
-  CopyTo(other_target_);
-  temp.CopyTo(this);
-  temp.Unuse();
-
-#ifdef DEBUG
-  is_shadowing_ = false;
-#endif
-}
-
-} }  // namespace v8::internal
diff --git a/src/jump-target.h b/src/jump-target.h
deleted file mode 100644
index a0d2686..0000000
--- a/src/jump-target.h
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JUMP_TARGET_H_
-#define V8_JUMP_TARGET_H_
-
-#if V8_TARGET_ARCH_IA32
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_X64
-#include "jump-target-heavy.h"
-#elif V8_TARGET_ARCH_ARM
-#include "jump-target-light.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "jump-target-light.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Shadow break targets
-//
-// A shadow break target represents a break target that is temporarily
-// shadowed by another one (represented by the original during
-// shadowing).  They are used to catch jumps to labels in certain
-// contexts, e.g. try blocks.  After shadowing ends, the formerly
-// shadowed target is again represented by the original and the
-// ShadowTarget can be used as a jump target in its own right,
-// representing the formerly shadowing target.
-
-class ShadowTarget : public BreakTarget {
- public:
-  // Construct a shadow jump target.  After construction the shadow
-  // target object holds the state of the original target, and the
-  // original target is actually a fresh one that intercepts control
-  // flow intended for the shadowed one.
-  explicit ShadowTarget(BreakTarget* shadowed);
-
-  virtual ~ShadowTarget() {}
-
-  // End shadowing.  After shadowing ends, the original jump target
-  // again gives access to the formerly shadowed target and the shadow
-  // target object gives access to the formerly shadowing target.
-  void StopShadowing();
-
-  // During shadowing, the currently shadowing target.  After
-  // shadowing, the target that was shadowed.
-  BreakTarget* other_target() const { return other_target_; }
-
- private:
-  // During shadowing, the currently shadowing target.  After
-  // shadowing, the target that was shadowed.
-  BreakTarget* other_target_;
-
-#ifdef DEBUG
-  bool is_shadowing_;
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_JUMP_TARGET_H_
diff --git a/src/liveedit.cc b/src/liveedit.cc
index dbcf5ef..1466766 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1013,8 +1013,8 @@
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
   if (IsJSFunctionCode(shared_info->code())) {
-    ReplaceCodeObject(shared_info->code(),
-                      *(compile_info_wrapper.GetFunctionCode()));
+    Handle<Code> code = compile_info_wrapper.GetFunctionCode();
+    ReplaceCodeObject(shared_info->code(), *code);
     Handle<Object> code_scope_info =  compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
       shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
@@ -1028,8 +1028,10 @@
     debug_info->set_original_code(*new_original_code);
   }
 
-  shared_info->set_start_position(compile_info_wrapper.GetStartPosition());
-  shared_info->set_end_position(compile_info_wrapper.GetEndPosition());
+  int start_position = compile_info_wrapper.GetStartPosition();
+  int end_position = compile_info_wrapper.GetEndPosition();
+  shared_info->set_start_position(start_position);
+  shared_info->set_end_position(end_position);
 
   shared_info->set_construct_stub(
       Isolate::Current()->builtins()->builtin(
@@ -1233,13 +1235,14 @@
   int old_function_start = info->start_position();
   int new_function_start = TranslatePosition(old_function_start,
                                              position_change_array);
-  info->set_start_position(new_function_start);
-  info->set_end_position(TranslatePosition(info->end_position(),
-                                           position_change_array));
+  int new_function_end = TranslatePosition(info->end_position(),
+                                           position_change_array);
+  int new_function_token_pos =
+      TranslatePosition(info->function_token_position(), position_change_array);
 
-  info->set_function_token_position(
-      TranslatePosition(info->function_token_position(),
-      position_change_array));
+  info->set_start_position(new_function_start);
+  info->set_end_position(new_function_end);
+  info->set_function_token_position(new_function_token_pos);
 
   if (IsJSFunctionCode(info->code())) {
     // Patch relocation info section of the code.
@@ -1393,17 +1396,18 @@
   ASSERT(bottom_js_frame->is_java_script());
 
   // Check the nature of the top frame.
-  Code* pre_top_frame_code = pre_top_frame->LookupCode(Isolate::Current());
+  Isolate* isolate = Isolate::Current();
+  Code* pre_top_frame_code = pre_top_frame->LookupCode();
   if (pre_top_frame_code->is_inline_cache_stub() &&
       pre_top_frame_code->ic_state() == DEBUG_BREAK) {
     // OK, we can drop inline cache calls.
     *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
   } else if (pre_top_frame_code ==
-             Isolate::Current()->debug()->debug_break_slot()) {
+             isolate->debug()->debug_break_slot()) {
     // OK, we can drop debug break slot.
     *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
   } else if (pre_top_frame_code ==
-      Isolate::Current()->builtins()->builtin(
+      isolate->builtins()->builtin(
           Builtins::kFrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
     *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
@@ -1567,8 +1571,8 @@
       : shared_info_array_(shared_info_array), result_(result),
         has_blocked_functions_(false) {
   }
-  void VisitThread(ThreadLocalTop* top) {
-    for (StackFrameIterator it(top); !it.done(); it.Advance()) {
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
       has_blocked_functions_ |= CheckActivation(
           shared_info_array_, result_, it.frame(),
           LiveEdit::FUNCTION_BLOCKED_ON_OTHER_STACK);
diff --git a/src/log.cc b/src/log.cc
index 6a601c6..6d95094 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -150,6 +150,7 @@
 
   sample->tos = NULL;
   sample->frames_count = 0;
+  sample->has_external_callback = false;
 
   // Avoid collecting traces while doing GC.
   if (sample->state == GC) return;
@@ -190,7 +191,7 @@
 //
 class Ticker: public Sampler {
  public:
-  explicit Ticker(Isolate* isolate, int interval):
+  Ticker(Isolate* isolate, int interval):
       Sampler(isolate, interval),
       window_(NULL),
       profiler_(NULL) {}
@@ -1315,7 +1316,6 @@
       case Code::FUNCTION:
       case Code::OPTIMIZED_FUNCTION:
         return;  // We log this later using LogCompiledFunctions.
-      case Code::BINARY_OP_IC:  // fall through
       case Code::TYPE_RECORDING_BINARY_OP_IC:   // fall through
       case Code::COMPARE_IC:  // fall through
       case Code::STUB:
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1f73388..bd36459 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -86,15 +86,15 @@
     GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
     EncodeForwardingAddresses();
 
-    heap_->MarkMapPointersAsEncoded(true);
+    heap()->MarkMapPointersAsEncoded(true);
     UpdatePointers();
-    heap_->MarkMapPointersAsEncoded(false);
-    heap_->isolate()->pc_to_code_cache()->Flush();
+    heap()->MarkMapPointersAsEncoded(false);
+    heap()->isolate()->pc_to_code_cache()->Flush();
 
     RelocateObjects();
   } else {
     SweepSpaces();
-    heap_->isolate()->pc_to_code_cache()->Flush();
+    heap()->isolate()->pc_to_code_cache()->Flush();
   }
 
   Finish();
@@ -123,7 +123,7 @@
   compact_on_next_gc_ = false;
 
   if (FLAG_never_compact) compacting_collection_ = false;
-  if (!HEAP->map_space()->MapPointersEncodable())
+  if (!heap()->map_space()->MapPointersEncodable())
       compacting_collection_ = false;
   if (FLAG_collect_maps) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
@@ -161,9 +161,9 @@
   // force lazy re-initialization of it. This must be done after the
   // GC, because it relies on the new address of certain old space
   // objects (empty string, illegal builtin).
-  Isolate::Current()->stub_cache()->Clear();
+  heap()->isolate()->stub_cache()->Clear();
 
-  heap_->external_string_table_.CleanUp();
+  heap()->external_string_table_.CleanUp();
 
   // If we've just compacted old space there's no reason to check the
   // fragmentation limit. Just return.
@@ -456,7 +456,7 @@
     for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
   }
 
-  static inline void VisitCodeTarget(RelocInfo* rinfo) {
+  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
     if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
@@ -464,48 +464,50 @@
       // Please note targets for cleared inline cached do not have to be
       // marked since they are contained in HEAP->non_monomorphic_cache().
     } else {
-      HEAP->mark_compact_collector()->MarkObject(code);
+      heap->mark_compact_collector()->MarkObject(code);
     }
   }
 
-  static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
+  static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
     Object* cell = rinfo->target_cell();
     Object* old_cell = cell;
-    VisitPointer(HEAP, &cell);
+    VisitPointer(heap, &cell);
     if (cell != old_cell) {
       rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
     }
   }
 
-  static inline void VisitDebugTarget(RelocInfo* rinfo) {
+  static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
             rinfo->IsPatchedReturnSequence()) ||
            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
             rinfo->IsPatchedDebugBreakSlotSequence()));
     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    HEAP->mark_compact_collector()->MarkObject(code);
+    heap->mark_compact_collector()->MarkObject(code);
   }
 
   // Mark object pointed to by p.
   INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
-    heap->mark_compact_collector()->MarkObject(object);
+    if (!object->IsMarked()) {
+      heap->mark_compact_collector()->MarkUnmarkedObject(object);
+    }
   }
 
 
   // Visit an unmarked object.
-  static inline void VisitUnmarkedObject(HeapObject* obj) {
+  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
+                                         HeapObject* obj)) {
 #ifdef DEBUG
-    ASSERT(HEAP->Contains(obj));
+    ASSERT(Isolate::Current()->heap()->Contains(obj));
     ASSERT(!obj->IsMarked());
 #endif
     Map* map = obj->map();
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
     collector->SetMark(obj);
     // Mark the map pointer and the body.
-    collector->MarkObject(map);
+    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
     IterateBody(map, obj);
   }
 
@@ -518,12 +520,13 @@
     StackLimitCheck check(heap->isolate());
     if (check.HasOverflowed()) return false;
 
+    MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
     for (Object** p = start; p < end; p++) {
       if (!(*p)->IsHeapObject()) continue;
       HeapObject* obj = HeapObject::cast(*p);
       if (obj->IsMarked()) continue;
-      VisitUnmarkedObject(obj);
+      VisitUnmarkedObject(collector, obj);
     }
     return true;
   }
@@ -561,8 +564,8 @@
   // flushed.
   static const int kCodeAgeThreshold = 5;
 
-  inline static bool HasSourceCode(SharedFunctionInfo* info) {
-    Object* undefined = HEAP->raw_unchecked_undefined_value();
+  inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+    Object* undefined = heap->raw_unchecked_undefined_value();
     return (info->script() != undefined) &&
         (reinterpret_cast<Script*>(info->script())->source() != undefined);
   }
@@ -570,15 +573,15 @@
 
   inline static bool IsCompiled(JSFunction* function) {
     return function->unchecked_code() !=
-        Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+        function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
   inline static bool IsCompiled(SharedFunctionInfo* function) {
     return function->unchecked_code() !=
-        Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
+        function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
-  inline static bool IsFlushable(JSFunction* function) {
+  inline static bool IsFlushable(Heap* heap, JSFunction* function) {
     SharedFunctionInfo* shared_info = function->unchecked_shared();
 
     // Code is either on stack, in compilation cache or referenced
@@ -593,10 +596,10 @@
       return false;
     }
 
-    return IsFlushable(shared_info);
+    return IsFlushable(heap, shared_info);
   }
 
-  inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
+  inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
     if (shared_info->unchecked_code()->IsMarked()) {
@@ -606,7 +609,7 @@
 
     // The function must be compiled and have the source code available,
     // to be able to recompile it in case we need the function again.
-    if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
+    if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
       return false;
     }
 
@@ -638,7 +641,7 @@
 
 
   static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
-    if (!IsFlushable(function)) return false;
+    if (!IsFlushable(heap, function)) return false;
 
     // This function's code looks flushable. But we have to postpone the
     // decision until we see all functions that point to the same
@@ -715,7 +718,7 @@
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
 
     if (!known_flush_code_candidate) {
-      known_flush_code_candidate = IsFlushable(shared);
+      known_flush_code_candidate = IsFlushable(heap, shared);
       if (known_flush_code_candidate) {
         heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
       }
@@ -865,16 +868,16 @@
     StaticMarkingVisitor::VisitPointers(heap_, start, end);
   }
 
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitCodeTarget(rinfo);
+  void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitCodeTarget(heap, rinfo);
   }
 
-  void VisitGlobalPropertyCell(RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
+  void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitGlobalPropertyCell(heap, rinfo);
   }
 
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    StaticMarkingVisitor::VisitDebugTarget(rinfo);
+  void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
+    StaticMarkingVisitor::VisitDebugTarget(heap, rinfo);
   }
 
  private:
@@ -887,8 +890,8 @@
   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
       : collector_(collector) {}
 
-  void VisitThread(ThreadLocalTop* top) {
-    for (StackFrameIterator it(top); !it.done(); it.Advance()) {
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
       collector_->MarkObject(it.frame()->unchecked_code());
     }
   }
@@ -922,7 +925,7 @@
 
 
 void MarkCompactCollector::PrepareForCodeFlushing() {
-  ASSERT(heap_ == Isolate::Current()->heap());
+  ASSERT(heap() == Isolate::Current()->heap());
 
   if (!FLAG_flush_code) {
     EnableCodeFlushing(false);
@@ -930,8 +933,8 @@
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  if (heap_->isolate()->debug()->IsLoaded() ||
-      heap_->isolate()->debug()->has_break_points()) {
+  if (heap()->isolate()->debug()->IsLoaded() ||
+      heap()->isolate()->debug()->has_break_points()) {
     EnableCodeFlushing(false);
     return;
   }
@@ -940,10 +943,10 @@
 
   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   // relies on it being marked before any other descriptor array.
-  MarkObject(heap_->raw_unchecked_empty_descriptor_array());
+  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
 
   // Make sure we are not referencing the code from the stack.
-  ASSERT(this == heap_->mark_compact_collector());
+  ASSERT(this == heap()->mark_compact_collector());
   for (StackFrameIterator it; !it.done(); it.Advance()) {
     MarkObject(it.frame()->unchecked_code());
   }
@@ -951,12 +954,12 @@
   // Iterate the archived stacks in all threads to check if
   // the code is referenced.
   CodeMarkingVisitor code_marking_visitor(this);
-  heap_->isolate()->thread_manager()->IterateArchivedThreads(
+  heap()->isolate()->thread_manager()->IterateArchivedThreads(
       &code_marking_visitor);
 
   SharedFunctionInfoMarkingVisitor visitor(this);
-  heap_->isolate()->compilation_cache()->IterateFunctions(&visitor);
-  heap_->isolate()->handle_scope_implementer()->Iterate(&visitor);
+  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
+  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
   ProcessMarkingStack();
 }
@@ -1004,7 +1007,8 @@
 // Helper class for pruning the symbol table.
 class SymbolTableCleaner : public ObjectVisitor {
  public:
-  SymbolTableCleaner() : pointers_removed_(0) { }
+  explicit SymbolTableCleaner(Heap* heap)
+    : heap_(heap), pointers_removed_(0) { }
 
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
@@ -1016,10 +1020,10 @@
         // Since no objects have yet been moved we can safely access the map of
         // the object.
         if ((*p)->IsExternalString()) {
-          HEAP->FinalizeExternalString(String::cast(*p));
+          heap_->FinalizeExternalString(String::cast(*p));
         }
         // Set the entry to null_value (as deleted).
-        *p = HEAP->raw_unchecked_null_value();
+        *p = heap_->raw_unchecked_null_value();
         pointers_removed_++;
       }
     }
@@ -1029,6 +1033,7 @@
     return pointers_removed_;
   }
  private:
+  Heap* heap_;
   int pointers_removed_;
 };
 
@@ -1054,7 +1059,7 @@
   if (object->IsMap()) {
     Map* map = Map::cast(object);
     if (FLAG_cleanup_caches_in_maps_at_gc) {
-      map->ClearCodeCache(heap_);
+      map->ClearCodeCache(heap());
     }
     SetMark(map);
     if (FLAG_collect_maps &&
@@ -1125,7 +1130,7 @@
 
 
 void MarkCompactCollector::CreateBackPointers() {
-  HeapObjectIterator iterator(HEAP->map_space());
+  HeapObjectIterator iterator(heap()->map_space());
   for (HeapObject* next_object = iterator.next();
        next_object != NULL; next_object = iterator.next()) {
     if (next_object->IsMap()) {  // Could also be ByteArray on free list.
@@ -1134,7 +1139,7 @@
           map->instance_type() <= JS_FUNCTION_TYPE) {
         map->CreateBackPointers();
       } else {
-        ASSERT(map->instance_descriptors() == HEAP->empty_descriptor_array());
+        ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
       }
     }
   }
@@ -1182,11 +1187,11 @@
 
 
 void MarkCompactCollector::MarkSymbolTable() {
-  SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
   // Mark the symbol table itself.
   SetMark(symbol_table);
   // Explicitly mark the prefix.
-  MarkingVisitor marker(heap_);
+  MarkingVisitor marker(heap());
   symbol_table->IteratePrefix(&marker);
   ProcessMarkingStack();
 }
@@ -1195,7 +1200,7 @@
 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
   // Mark the heap roots including global variables, stack variables,
   // etc., and all objects reachable from them.
-  HEAP->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
 
   // Handle the symbol table specially.
   MarkSymbolTable();
@@ -1210,15 +1215,16 @@
 
 void MarkCompactCollector::MarkObjectGroups() {
   List<ObjectGroup*>* object_groups =
-      heap_->isolate()->global_handles()->object_groups();
+      heap()->isolate()->global_handles()->object_groups();
 
+  int last = 0;
   for (int i = 0; i < object_groups->length(); i++) {
     ObjectGroup* entry = object_groups->at(i);
-    if (entry == NULL) continue;
+    ASSERT(entry != NULL);
 
-    List<Object**>& objects = entry->objects_;
+    Object*** objects = entry->objects_;
     bool group_marked = false;
-    for (int j = 0; j < objects.length(); j++) {
+    for (size_t j = 0; j < entry->length_; j++) {
       Object* object = *objects[j];
       if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
         group_marked = true;
@@ -1226,48 +1232,54 @@
       }
     }
 
-    if (!group_marked) continue;
+    if (!group_marked) {
+      (*object_groups)[last++] = entry;
+      continue;
+    }
 
-    // An object in the group is marked, so mark as gray all white heap
-    // objects in the group.
-    for (int j = 0; j < objects.length(); ++j) {
+    // An object in the group is marked, so mark all heap objects in
+    // the group.
+    for (size_t j = 0; j < entry->length_; ++j) {
       if ((*objects[j])->IsHeapObject()) {
         MarkObject(HeapObject::cast(*objects[j]));
       }
     }
 
-    // Once the entire group has been colored gray, set the object group
-    // to NULL so it won't be processed again.
-    delete entry;
-    object_groups->at(i) = NULL;
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
+    entry->Dispose();
   }
+  object_groups->Rewind(last);
 }
 
 
 void MarkCompactCollector::MarkImplicitRefGroups() {
   List<ImplicitRefGroup*>* ref_groups =
-      heap_->isolate()->global_handles()->implicit_ref_groups();
+      heap()->isolate()->global_handles()->implicit_ref_groups();
 
+  int last = 0;
   for (int i = 0; i < ref_groups->length(); i++) {
     ImplicitRefGroup* entry = ref_groups->at(i);
-    if (entry == NULL) continue;
+    ASSERT(entry != NULL);
 
-    if (!entry->parent_->IsMarked()) continue;
+    if (!(*entry->parent_)->IsMarked()) {
+      (*ref_groups)[last++] = entry;
+      continue;
+    }
 
-    List<Object**>& children = entry->children_;
-    // A parent object is marked, so mark as gray all child white heap
-    // objects.
-    for (int j = 0; j < children.length(); ++j) {
+    Object*** children = entry->children_;
+    // A parent object is marked, so mark all child heap objects.
+    for (size_t j = 0; j < entry->length_; ++j) {
       if ((*children[j])->IsHeapObject()) {
         MarkObject(HeapObject::cast(*children[j]));
       }
     }
 
-    // Once the entire group has been colored gray, set the  group
-    // to NULL so it won't be processed again.
-    delete entry;
-    ref_groups->at(i) = NULL;
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
+    entry->Dispose();
   }
+  ref_groups->Rewind(last);
 }
 
 
@@ -1279,7 +1291,7 @@
   while (!marking_stack_.is_empty()) {
     HeapObject* object = marking_stack_.Pop();
     ASSERT(object->IsHeapObject());
-    ASSERT(heap_->Contains(object));
+    ASSERT(heap()->Contains(object));
     ASSERT(object->IsMarked());
     ASSERT(!object->IsOverflowed());
 
@@ -1303,32 +1315,32 @@
 void MarkCompactCollector::RefillMarkingStack() {
   ASSERT(marking_stack_.overflowed());
 
-  SemiSpaceIterator new_it(HEAP->new_space(), &OverflowObjectSize);
+  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator old_pointer_it(HEAP->old_pointer_space(),
+  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
                                     &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator old_data_it(HEAP->old_data_space(), &OverflowObjectSize);
+  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator code_it(HEAP->code_space(), &OverflowObjectSize);
+  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator map_it(HEAP->map_space(), &OverflowObjectSize);
+  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
   if (marking_stack_.is_full()) return;
 
-  HeapObjectIterator cell_it(HEAP->cell_space(), &OverflowObjectSize);
+  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
   if (marking_stack_.is_full()) return;
 
-  LargeObjectIterator lo_it(HEAP->lo_space(), &OverflowObjectSize);
+  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
   OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
   if (marking_stack_.is_full()) return;
 
@@ -1366,7 +1378,7 @@
   // The recursive GC marker detects when it is nearing stack overflow,
   // and switches to a different marking system.  JS interrupts interfere
   // with the C stack limit check.
-  PostponeInterruptsScope postpone(heap_->isolate());
+  PostponeInterruptsScope postpone(heap()->isolate());
 
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
@@ -1374,14 +1386,14 @@
 #endif
   // The to space contains live objects, the from space is used as a marking
   // stack.
-  marking_stack_.Initialize(heap_->new_space()->FromSpaceLow(),
-                            heap_->new_space()->FromSpaceHigh());
+  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
+                            heap()->new_space()->FromSpaceHigh());
 
   ASSERT(!marking_stack_.overflowed());
 
   PrepareForCodeFlushing();
 
-  RootMarkingVisitor root_visitor(heap_);
+  RootMarkingVisitor root_visitor(heap());
   MarkRoots(&root_visitor);
 
   // The objects reachable from the roots are marked, yet unreachable
@@ -1395,10 +1407,10 @@
   //
   // First we identify nonlive weak handles and mark them as pending
   // destruction.
-  heap_->isolate()->global_handles()->IdentifyWeakHandles(
+  heap()->isolate()->global_handles()->IdentifyWeakHandles(
       &IsUnmarkedHeapObject);
   // Then we mark the objects and process the transitive closure.
-  heap_->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
   while (marking_stack_.overflowed()) {
     RefillMarkingStack();
     EmptyMarkingStack();
@@ -1411,20 +1423,20 @@
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
-  SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
-  SymbolTableCleaner v;
+  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTableCleaner v(heap());
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
-  heap_->external_string_table_.Iterate(&v);
-  heap_->external_string_table_.CleanUp();
+  heap()->external_string_table_.Iterate(&v);
+  heap()->external_string_table_.CleanUp();
 
   // Process the weak references.
   MarkCompactWeakObjectRetainer mark_compact_object_retainer;
-  heap_->ProcessWeakReferences(&mark_compact_object_retainer);
+  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
 
   // Remove object groups after marking phase.
-  heap_->isolate()->global_handles()->RemoveObjectGroups();
-  heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
+  heap()->isolate()->global_handles()->RemoveObjectGroups();
+  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
 
   // Flush code from collected candidates.
   if (is_code_flushing_enabled()) {
@@ -1432,28 +1444,28 @@
   }
 
   // Clean up dead objects from the runtime profiler.
-  heap_->isolate()->runtime_profiler()->RemoveDeadSamples();
+  heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
 }
 
 
 #ifdef DEBUG
 void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
   live_bytes_ += obj->Size();
-  if (HEAP->new_space()->Contains(obj)) {
+  if (heap()->new_space()->Contains(obj)) {
     live_young_objects_size_ += obj->Size();
-  } else if (HEAP->map_space()->Contains(obj)) {
+  } else if (heap()->map_space()->Contains(obj)) {
     ASSERT(obj->IsMap());
     live_map_objects_size_ += obj->Size();
-  } else if (HEAP->cell_space()->Contains(obj)) {
+  } else if (heap()->cell_space()->Contains(obj)) {
     ASSERT(obj->IsJSGlobalPropertyCell());
     live_cell_objects_size_ += obj->Size();
-  } else if (HEAP->old_pointer_space()->Contains(obj)) {
+  } else if (heap()->old_pointer_space()->Contains(obj)) {
     live_old_pointer_objects_size_ += obj->Size();
-  } else if (HEAP->old_data_space()->Contains(obj)) {
+  } else if (heap()->old_data_space()->Contains(obj)) {
     live_old_data_objects_size_ += obj->Size();
-  } else if (HEAP->code_space()->Contains(obj)) {
+  } else if (heap()->code_space()->Contains(obj)) {
     live_code_objects_size_ += obj->Size();
-  } else if (HEAP->lo_space()->Contains(obj)) {
+  } else if (heap()->lo_space()->Contains(obj)) {
     live_lo_objects_size_ += obj->Size();
   } else {
     UNREACHABLE();
@@ -1469,7 +1481,7 @@
       compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
 #endif
   // Deallocate unmarked objects and clear marked bits for marked objects.
-  HEAP->lo_space()->FreeUnmarkedObjects();
+  heap()->lo_space()->FreeUnmarkedObjects();
 }
 
 
@@ -1482,7 +1494,7 @@
 
 
 void MarkCompactCollector::ClearNonLiveTransitions() {
-  HeapObjectIterator map_iterator(HEAP->map_space(), &SizeOfMarkedObject);
+  HeapObjectIterator map_iterator(heap() ->map_space(), &SizeOfMarkedObject);
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  At the same time,
   // set all the prototype fields of maps back to their original value,
@@ -1532,7 +1544,7 @@
       // This test will always be false on the first iteration.
       if (on_dead_path && current->IsMarked()) {
         on_dead_path = false;
-        current->ClearNonLiveTransitions(heap_, real_prototype);
+        current->ClearNonLiveTransitions(heap(), real_prototype);
       }
       *HeapObject::RawField(current, Map::kPrototypeOffset) =
           real_prototype;
@@ -1690,7 +1702,7 @@
 
 
 // Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object) {}
+inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
 
 
 // Function template that, given a range of addresses (eg, a semispace or a
@@ -1744,7 +1756,7 @@
       }
     } else {  // Non-live object.
       object_size = object->Size();
-      ProcessNonLive(object);
+      ProcessNonLive(object, collector->heap()->isolate());
       if (is_prev_alive) {  // Transition from live to non-live.
         free_start = current;
         is_prev_alive = false;
@@ -1767,8 +1779,8 @@
                                    EncodeForwardingAddressInNewSpace,
                                    IgnoreNonLiveObject>(
       this,
-      heap_->new_space()->bottom(),
-      heap_->new_space()->top(),
+      heap()->new_space()->bottom(),
+      heap()->new_space()->top(),
       &ignored);
 }
 
@@ -2089,7 +2101,8 @@
           is_previous_alive = true;
         }
       } else {
-        heap->mark_compact_collector()->ReportDeleteIfNeeded(object);
+        heap->mark_compact_collector()->ReportDeleteIfNeeded(
+            object, heap->isolate());
         if (is_previous_alive) {  // Transition from live to free.
           free_start = current;
           is_previous_alive = false;
@@ -2189,24 +2202,24 @@
   // Objects in the active semispace of the young generation may be
   // relocated to the inactive semispace (if not promoted).  Set the
   // relocation info to the beginning of the inactive semispace.
-  heap_->new_space()->MCResetRelocationInfo();
+  heap()->new_space()->MCResetRelocationInfo();
 
   // Compute the forwarding pointers in each space.
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
                                         ReportDeleteIfNeeded>(
-      heap_->old_pointer_space());
+      heap()->old_pointer_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
                                         IgnoreNonLiveObject>(
-      heap_->old_data_space());
+      heap()->old_data_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
                                         ReportDeleteIfNeeded>(
-      heap_->code_space());
+      heap()->code_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
                                         IgnoreNonLiveObject>(
-      heap_->cell_space());
+      heap()->cell_space());
 
 
   // Compute new space next to last after the old and code spaces have been
@@ -2218,25 +2231,26 @@
   // non-live map pointers to get the sizes of non-live objects.
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
                                         IgnoreNonLiveObject>(
-      heap_->map_space());
+      heap()->map_space());
 
   // Write relocation info to the top page, so we can use it later.  This is
   // done after promoting objects from the new space so we get the correct
   // allocation top.
-  heap_->old_pointer_space()->MCWriteRelocationInfoToPage();
-  heap_->old_data_space()->MCWriteRelocationInfoToPage();
-  heap_->code_space()->MCWriteRelocationInfoToPage();
-  heap_->map_space()->MCWriteRelocationInfoToPage();
-  heap_->cell_space()->MCWriteRelocationInfoToPage();
+  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
+  heap()->old_data_space()->MCWriteRelocationInfoToPage();
+  heap()->code_space()->MCWriteRelocationInfoToPage();
+  heap()->map_space()->MCWriteRelocationInfoToPage();
+  heap()->cell_space()->MCWriteRelocationInfoToPage();
 }
 
 
 class MapIterator : public HeapObjectIterator {
  public:
-  MapIterator() : HeapObjectIterator(HEAP->map_space(), &SizeCallback) { }
+  explicit MapIterator(Heap* heap)
+    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
 
-  explicit MapIterator(Address start)
-      : HeapObjectIterator(HEAP->map_space(), start, &SizeCallback) { }
+  MapIterator(Heap* heap, Address start)
+      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
 
  private:
   static int SizeCallback(HeapObject* unused) {
@@ -2252,7 +2266,8 @@
     : heap_(heap),
       live_maps_(live_maps),
       to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
-      map_to_evacuate_it_(to_evacuate_start_),
+      vacant_map_it_(heap),
+      map_to_evacuate_it_(heap, to_evacuate_start_),
       first_map_to_evacuate_(
           reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
   }
@@ -2273,36 +2288,41 @@
 
   void UpdateMapPointersInRoots() {
     MapUpdatingVisitor map_updating_visitor;
-    heap_->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
-    heap_->isolate()->global_handles()->IterateWeakRoots(&map_updating_visitor);
+    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+    heap()->isolate()->global_handles()->IterateWeakRoots(
+        &map_updating_visitor);
     LiveObjectList::IterateElements(&map_updating_visitor);
   }
 
   void UpdateMapPointersInPagedSpace(PagedSpace* space) {
-    ASSERT(space != heap_->map_space());
+    ASSERT(space != heap()->map_space());
 
     PageIterator it(space, PageIterator::PAGES_IN_USE);
     while (it.has_next()) {
       Page* p = it.next();
-      UpdateMapPointersInRange(heap_, p->ObjectAreaStart(), p->AllocationTop());
+      UpdateMapPointersInRange(heap(),
+                               p->ObjectAreaStart(),
+                               p->AllocationTop());
     }
   }
 
   void UpdateMapPointersInNewSpace() {
-    NewSpace* space = heap_->new_space();
-    UpdateMapPointersInRange(heap_, space->bottom(), space->top());
+    NewSpace* space = heap()->new_space();
+    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
   }
 
   void UpdateMapPointersInLargeObjectSpace() {
-    LargeObjectIterator it(heap_->lo_space());
+    LargeObjectIterator it(heap()->lo_space());
     for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
-      UpdateMapPointersInObject(heap_, obj);
+      UpdateMapPointersInObject(heap(), obj);
   }
 
   void Finish() {
-    heap_->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
   }
 
+  inline Heap* heap() const { return heap_; }
+
  private:
   Heap* heap_;
   int live_maps_;
@@ -2452,26 +2472,26 @@
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(heap_, heap_->old_pointer_space());
-  SweepSpace(heap_, heap_->old_data_space());
-  SweepSpace(heap_, heap_->code_space());
-  SweepSpace(heap_, heap_->cell_space());
+  SweepSpace(heap(), heap()->old_pointer_space());
+  SweepSpace(heap(), heap()->old_data_space());
+  SweepSpace(heap(), heap()->code_space());
+  SweepSpace(heap(), heap()->cell_space());
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    SweepNewSpace(heap_, heap_->new_space());
+    SweepNewSpace(heap(), heap()->new_space());
   }
-  SweepSpace(heap_, heap_->map_space());
+  SweepSpace(heap(), heap()->map_space());
 
-  heap_->IterateDirtyRegions(heap_->map_space(),
-                             &heap_->IteratePointersInDirtyMapsRegion,
+  heap()->IterateDirtyRegions(heap()->map_space(),
+                             &heap()->IteratePointersInDirtyMapsRegion,
                              &UpdatePointerToNewGen,
-                             heap_->WATERMARK_SHOULD_BE_VALID);
+                             heap()->WATERMARK_SHOULD_BE_VALID);
 
-  intptr_t live_maps_size = heap_->map_space()->Size();
+  intptr_t live_maps_size = heap()->map_space()->Size();
   int live_maps = static_cast<int>(live_maps_size / Map::kSize);
   ASSERT(live_map_objects_size_ == live_maps_size);
 
-  if (heap_->map_space()->NeedsCompaction(live_maps)) {
-    MapCompact map_compact(heap_, live_maps);
+  if (heap()->map_space()->NeedsCompaction(live_maps)) {
+    MapCompact map_compact(heap(), live_maps);
 
     map_compact.CompactMaps();
     map_compact.UpdateMapPointersInRoots();
@@ -2479,7 +2499,7 @@
     PagedSpaces spaces;
     for (PagedSpace* space = spaces.next();
          space != NULL; space = spaces.next()) {
-      if (space == heap_->map_space()) continue;
+      if (space == heap()->map_space()) continue;
       map_compact.UpdateMapPointersInPagedSpace(space);
     }
     map_compact.UpdateMapPointersInNewSpace();
@@ -2575,6 +2595,8 @@
         reinterpret_cast<Code*>(target)->instruction_start());
   }
 
+  inline Heap* heap() const { return heap_; }
+
  private:
   void UpdatePointer(Object** p) {
     if (!(*p)->IsHeapObject()) return;
@@ -2582,27 +2604,27 @@
     HeapObject* obj = HeapObject::cast(*p);
     Address old_addr = obj->address();
     Address new_addr;
-    ASSERT(!heap_->InFromSpace(obj));
+    ASSERT(!heap()->InFromSpace(obj));
 
-    if (heap_->new_space()->Contains(obj)) {
+    if (heap()->new_space()->Contains(obj)) {
       Address forwarding_pointer_addr =
-          heap_->new_space()->FromSpaceLow() +
-          heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
+          heap()->new_space()->FromSpaceLow() +
+          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
       new_addr = Memory::Address_at(forwarding_pointer_addr);
 
 #ifdef DEBUG
-      ASSERT(heap_->old_pointer_space()->Contains(new_addr) ||
-             heap_->old_data_space()->Contains(new_addr) ||
-             heap_->new_space()->FromSpaceContains(new_addr) ||
-             heap_->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
+             heap()->old_data_space()->Contains(new_addr) ||
+             heap()->new_space()->FromSpaceContains(new_addr) ||
+             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
 
-      if (heap_->new_space()->FromSpaceContains(new_addr)) {
-        ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-               heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
+      if (heap()->new_space()->FromSpaceContains(new_addr)) {
+        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
       }
 #endif
 
-    } else if (heap_->lo_space()->Contains(obj)) {
+    } else if (heap()->lo_space()->Contains(obj)) {
       // Don't move objects in the large object space.
       return;
 
@@ -2641,34 +2663,34 @@
   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
   state_ = UPDATE_POINTERS;
 #endif
-  UpdatingVisitor updating_visitor(heap_);
-  heap_->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+  UpdatingVisitor updating_visitor(heap());
+  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
       &updating_visitor);
-  heap_->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
-  heap_->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
+  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
 
   // Update the pointer to the head of the weak list of global contexts.
-  updating_visitor.VisitPointer(&heap_->global_contexts_list_);
+  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
 
   LiveObjectList::IterateElements(&updating_visitor);
 
   int live_maps_size = IterateLiveObjects(
-      heap_->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
   int live_pointer_olds_size = IterateLiveObjects(
-      heap_->old_pointer_space(),
+      heap()->old_pointer_space(),
       &MarkCompactCollector::UpdatePointersInOldObject);
   int live_data_olds_size = IterateLiveObjects(
-      heap_->old_data_space(),
+      heap()->old_data_space(),
       &MarkCompactCollector::UpdatePointersInOldObject);
   int live_codes_size = IterateLiveObjects(
-      heap_->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
   int live_cells_size = IterateLiveObjects(
-      heap_->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
   int live_news_size = IterateLiveObjects(
-      heap_->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
+      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
 
   // Large objects do not move, the map word can be updated directly.
-  LargeObjectIterator it(heap_->lo_space());
+  LargeObjectIterator it(heap()->lo_space());
   for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     UpdatePointersInNewObject(obj);
   }
@@ -2695,8 +2717,8 @@
 
   Address forwarded = GetForwardingAddressInOldSpace(old_map);
 
-  ASSERT(heap_->map_space()->Contains(old_map));
-  ASSERT(heap_->map_space()->Contains(forwarded));
+  ASSERT(heap()->map_space()->Contains(old_map));
+  ASSERT(heap()->map_space()->Contains(forwarded));
 #ifdef DEBUG
   if (FLAG_gc_verbose) {
     PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
@@ -2711,7 +2733,7 @@
   int obj_size = obj->SizeFromMap(old_map);
 
   // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap_);
+  UpdatingVisitor updating_visitor(heap());
   obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
   return obj_size;
 }
@@ -2720,8 +2742,8 @@
 int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
   // Decode the map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
 
   // At this point, the first word of map_addr is also encoded, cannot
   // cast it to Map* using Map::cast.
@@ -2742,7 +2764,7 @@
 #endif
 
   // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap_);
+  UpdatingVisitor updating_visitor(heap());
   obj->IterateBody(type, obj_size, &updating_visitor);
   return obj_size;
 }
@@ -2799,18 +2821,18 @@
   // Relocates objects, always relocate map objects first. Relocating
   // objects in other space relies on map objects to get object size.
   int live_maps_size = IterateLiveObjects(
-      heap_->map_space(), &MarkCompactCollector::RelocateMapObject);
+      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
   int live_pointer_olds_size = IterateLiveObjects(
-      heap_->old_pointer_space(),
+      heap()->old_pointer_space(),
       &MarkCompactCollector::RelocateOldPointerObject);
   int live_data_olds_size = IterateLiveObjects(
-      heap_->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
   int live_codes_size = IterateLiveObjects(
-      heap_->code_space(), &MarkCompactCollector::RelocateCodeObject);
+      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
   int live_cells_size = IterateLiveObjects(
-      heap_->cell_space(), &MarkCompactCollector::RelocateCellObject);
+      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
   int live_news_size = IterateLiveObjects(
-      heap_->new_space(), &MarkCompactCollector::RelocateNewObject);
+      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
 
   USE(live_maps_size);
   USE(live_pointer_olds_size);
@@ -2826,28 +2848,28 @@
   ASSERT(live_news_size == live_young_objects_size_);
 
   // Flip from and to spaces
-  heap_->new_space()->Flip();
+  heap()->new_space()->Flip();
 
-  heap_->new_space()->MCCommitRelocationInfo();
+  heap()->new_space()->MCCommitRelocationInfo();
 
   // Set age_mark to bottom in to space
-  Address mark = heap_->new_space()->bottom();
-  heap_->new_space()->set_age_mark(mark);
+  Address mark = heap()->new_space()->bottom();
+  heap()->new_space()->set_age_mark(mark);
 
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
     space->MCCommitRelocationInfo();
 
-  heap_->CheckNewSpaceExpansionCriteria();
-  heap_->IncrementYoungSurvivorsCounter(live_news_size);
+  heap()->CheckNewSpaceExpansionCriteria();
+  heap()->IncrementYoungSurvivorsCounter(live_news_size);
 }
 
 
 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
   // Recover map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
 
   // Get forwarding address before resetting map pointer
   Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2860,7 +2882,7 @@
 
   if (new_addr != old_addr) {
     // Move contents.
-    heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
                                                    old_addr,
                                                    Map::kSize);
   }
@@ -2906,8 +2928,8 @@
                                                    PagedSpace* space) {
   // Recover map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(map_addr));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(map_addr));
 
   // Get forwarding address before resetting map pointer.
   Address new_addr = GetForwardingAddressInOldSpace(obj);
@@ -2919,10 +2941,10 @@
 
   if (new_addr != old_addr) {
     // Move contents.
-    if (space == heap_->old_data_space()) {
-      heap_->MoveBlock(new_addr, old_addr, obj_size);
+    if (space == heap()->old_data_space()) {
+      heap()->MoveBlock(new_addr, old_addr, obj_size);
     } else {
-      heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
                                                      old_addr,
                                                      obj_size);
     }
@@ -2932,47 +2954,47 @@
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap_->isolate(),
+    PROFILE(heap()->isolate(),
             SharedFunctionInfoMoveEvent(old_addr, new_addr));
   }
-  HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
 
   return obj_size;
 }
 
 
 int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap_->old_pointer_space());
+  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
 }
 
 
 int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap_->old_data_space());
+  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
 }
 
 
 int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap_->cell_space());
+  return RelocateOldNonCodeObject(obj, heap()->cell_space());
 }
 
 
 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
   // Recover map pointer.
   MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
-  ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
 
   // Get forwarding address before resetting map pointer
   Address new_addr = GetForwardingAddressInOldSpace(obj);
 
   // Reset the map pointer.
-  int obj_size = RestoreMap(obj, heap_->code_space(), new_addr, map_addr);
+  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
 
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
     // Move contents.
-    heap_->MoveBlock(new_addr, old_addr, obj_size);
+    heap()->MoveBlock(new_addr, old_addr, obj_size);
   }
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2980,9 +3002,9 @@
     // May also update inline cache target.
     Code::cast(copied_to)->Relocate(new_addr - old_addr);
     // Notify the logger that compiled code has moved.
-    PROFILE(heap_->isolate(), CodeMoveEvent(old_addr, new_addr));
+    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
   }
-  HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
 
   return obj_size;
 }
@@ -2993,26 +3015,26 @@
 
   // Get forwarding address
   Address old_addr = obj->address();
-  int offset = heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
+  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
 
   Address new_addr =
-    Memory::Address_at(heap_->new_space()->FromSpaceLow() + offset);
+    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
 
 #ifdef DEBUG
-  if (heap_->new_space()->FromSpaceContains(new_addr)) {
-    ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-           heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
+  if (heap()->new_space()->FromSpaceContains(new_addr)) {
+    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
   } else {
-    ASSERT(heap_->TargetSpace(obj) == heap_->old_pointer_space() ||
-           heap_->TargetSpace(obj) == heap_->old_data_space());
+    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
+           heap()->TargetSpace(obj) == heap()->old_data_space());
   }
 #endif
 
   // New and old addresses cannot overlap.
-  if (heap_->InNewSpace(HeapObject::FromAddress(new_addr))) {
-    heap_->CopyBlock(new_addr, old_addr, obj_size);
+  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
+    heap()->CopyBlock(new_addr, old_addr, obj_size);
   } else {
-    heap_->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
                                                    old_addr,
                                                    obj_size);
   }
@@ -3025,10 +3047,10 @@
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap_->isolate(),
+    PROFILE(heap()->isolate(),
             SharedFunctionInfoMoveEvent(old_addr, new_addr));
   }
-  HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
 
   return obj_size;
 }
@@ -3037,7 +3059,7 @@
 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
   if (enable) {
     if (code_flusher_ != NULL) return;
-    code_flusher_ = new CodeFlusher(heap_->isolate());
+    code_flusher_ = new CodeFlusher(heap()->isolate());
   } else {
     if (code_flusher_ == NULL) return;
     delete code_flusher_;
@@ -3046,7 +3068,8 @@
 }
 
 
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+                                                Isolate* isolate) {
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (obj->IsCode()) {
     GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
@@ -3054,7 +3077,7 @@
 #endif
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (obj->IsCode()) {
-    PROFILE(ISOLATE, CodeDeleteEvent(obj->address()));
+    PROFILE(isolate, CodeDeleteEvent(obj->address()));
   }
 #endif
 }
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 3c9d28b..04d0ff6 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -98,8 +98,6 @@
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
-//
-// All methods are static.
 
 class OverflowedObjectsScanner;
 
@@ -129,7 +127,7 @@
                                    int* offset);
 
   // Type of functions to process non-live objects.
-  typedef void (*ProcessNonLiveFunction)(HeapObject* object);
+  typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
 
   // Pointer to member function, used in IterateLiveObjects.
   typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
@@ -179,7 +177,7 @@
 #endif
 
   // Determine type of object and emit deletion log event.
-  static void ReportDeleteIfNeeded(HeapObject* obj);
+  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
   // Returns size of a possibly marked object.
   static int SizeOfMarkedObject(HeapObject* obj);
diff --git a/src/messages.cc b/src/messages.cc
index cab982c..abc2537 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -56,11 +56,6 @@
 }
 
 
-void MessageHandler::ReportMessage(const char* msg) {
-  PrintF("%s\n", msg);
-}
-
-
 Handle<JSMessageObject> MessageHandler::MakeMessageObject(
     const char* type,
     MessageLocation* loc,
@@ -106,14 +101,25 @@
 }
 
 
-void MessageHandler::ReportMessage(MessageLocation* loc,
+void MessageHandler::ReportMessage(Isolate* isolate,
+                                   MessageLocation* loc,
                                    Handle<Object> message) {
+  // We are calling into embedder's code which can throw exceptions.
+  // Thus we need to save current exception state, reset it to the clean one
+  // and ignore scheduled exceptions callbacks can throw.
+  Isolate::ExceptionScope exception_scope(isolate);
+  isolate->clear_pending_exception();
+  isolate->set_external_caught_exception(false);
+
   v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
 
   v8::NeanderArray global_listeners(FACTORY->message_listeners());
   int global_length = global_listeners.length();
   if (global_length == 0) {
     DefaultMessageReport(loc, message);
+    if (isolate->has_scheduled_exception()) {
+      isolate->clear_scheduled_exception();
+    }
   } else {
     for (int i = 0; i < global_length; i++) {
       HandleScope scope;
@@ -123,7 +129,14 @@
       v8::MessageCallback callback =
           FUNCTION_CAST<v8::MessageCallback>(callback_obj->proxy());
       Handle<Object> callback_data(listener.get(1));
-      callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+      {
+        // Do not allow exceptions to propagate.
+        v8::TryCatch tryCatch;
+        callback(api_message_obj, v8::Utils::ToLocal(callback_data));
+      }
+      if (isolate->has_scheduled_exception()) {
+        isolate->clear_scheduled_exception();
+      }
     }
   }
 }
diff --git a/src/messages.h b/src/messages.h
index 48f3244..fc2162d 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -89,9 +89,6 @@
 // of message listeners registered in an environment
 class MessageHandler {
  public:
-  // Report a message (w/o JS heap allocation).
-  static void ReportMessage(const char* msg);
-
   // Returns a message object for the API to use.
   static Handle<JSMessageObject> MakeMessageObject(
       const char* type,
@@ -101,7 +98,9 @@
       Handle<JSArray> stack_frames);
 
   // Report a formatted message (needs JS allocation).
-  static void ReportMessage(MessageLocation* loc, Handle<Object> message);
+  static void ReportMessage(Isolate* isolate,
+                            MessageLocation* loc,
+                            Handle<Object> message);
 
   static void DefaultMessageReport(const MessageLocation* loc,
                                    Handle<Object> message_obj);
diff --git a/src/messages.js b/src/messages.js
index 3eb056f..e657fc0 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -190,6 +190,7 @@
       property_desc_object:         ["Property description must be an object: ", "%0"],
       redefine_disallowed:          ["Cannot redefine property: ", "%0"],
       define_disallowed:            ["Cannot define property, object is not extensible: ", "%0"],
+      non_extensible_proto:         ["%0", " is not extensible"],
       // RangeError
       invalid_array_length:         ["Invalid array length"],
       stack_overflow:               ["Maximum call stack size exceeded"],
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 6441470..f507590 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -147,7 +147,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index be8b74e..cf30b09 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -106,7 +106,7 @@
   inline VirtualFrame();
 
   // Construct an invalid virtual frame, used by JumpTargets.
-  inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
+  explicit inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
 
   // Construct a virtual frame as a clone of an existing one.
   explicit inline VirtualFrame(VirtualFrame* original);
diff --git a/src/natives.h b/src/natives.h
index 639a2d3..92f0d90 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,7 @@
                                      int index);
 
 enum NativeType {
-  CORE, D8
+  CORE, EXPERIMENTAL, D8, I18N
 };
 
 template <NativeType type>
@@ -57,6 +57,7 @@
 };
 
 typedef NativesCollection<CORE> Natives;
+typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
 
 } }  // namespace v8::internal
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 5395bbb..823b2da 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -1774,7 +1774,7 @@
 void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
   desc->Init(GetKey(descriptor_number),
              GetValue(descriptor_number),
-             GetDetails(descriptor_number));
+             PropertyDetails(GetDetails(descriptor_number)));
 }
 
 
@@ -2573,7 +2573,6 @@
 
 int Code::major_key() {
   ASSERT(kind() == STUB ||
-         kind() == BINARY_OP_IC ||
          kind() == TYPE_RECORDING_BINARY_OP_IC ||
          kind() == COMPARE_IC);
   return READ_BYTE_FIELD(this, kStubMajorKeyOffset);
@@ -2582,7 +2581,6 @@
 
 void Code::set_major_key(int major) {
   ASSERT(kind() == STUB ||
-         kind() == BINARY_OP_IC ||
          kind() == TYPE_RECORDING_BINARY_OP_IC ||
          kind() == COMPARE_IC);
   ASSERT(0 <= major && major < 256);
@@ -2691,18 +2689,6 @@
 }
 
 
-byte Code::binary_op_type() {
-  ASSERT(is_binary_op_stub());
-  return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
-}
-
-
-void Code::set_binary_op_type(byte value) {
-  ASSERT(is_binary_op_stub());
-  WRITE_BYTE_FIELD(this, kBinaryOpTypeOffset, value);
-}
-
-
 byte Code::type_recording_binary_op_type() {
   ASSERT(is_type_recording_binary_op_stub());
   return READ_BYTE_FIELD(this, kBinaryOpTypeOffset);
@@ -2862,6 +2848,34 @@
 }
 
 
+Heap* Code::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Isolate* Code::isolate() {
+  return heap()->isolate();
+}
+
+
+Heap* JSGlobalPropertyCell::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Isolate* JSGlobalPropertyCell::isolate() {
+  return heap()->isolate();
+}
+
+
 Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
   return HeapObject::
       FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3028,10 +3042,6 @@
             kHasOnlySimpleThisPropertyAssignments)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
-               try_full_codegen,
-               kTryFullCodegen)
-BOOL_ACCESSORS(SharedFunctionInfo,
-               compiler_hints,
                allows_lazy_compilation,
                kAllowLazyCompilation)
 
@@ -3299,6 +3309,11 @@
 }
 
 
+bool JSFunction::IsOptimizable() {
+  return code()->kind() == Code::FUNCTION && code()->optimizable();
+}
+
+
 bool JSFunction::IsMarkedForLazyRecompilation() {
   return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
 }
@@ -3929,6 +3944,15 @@
   set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes)));
 }
 
+
+template<typename Shape, typename Key>
+void Dictionary<Shape, Key>::SetEntry(int entry,
+                                      Object* key,
+                                      Object* value) {
+  SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
+}
+
+
 template<typename Shape, typename Key>
 void Dictionary<Shape, Key>::SetEntry(int entry,
                                       Object* key,
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 42f9060..da955da 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -141,13 +141,22 @@
 template<typename Callback>
 class VisitorDispatchTable {
  public:
+  void CopyFrom(VisitorDispatchTable* other) {
+    // We are not using memcpy to guarantee that during update
+    // every element of callbacks_ array will remain correct
+    // pointer (memcpy might be implemented as a byte copying loop).
+    for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
+      NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
+    }
+  }
+
   inline Callback GetVisitor(Map* map) {
-    return callbacks_[map->visitor_id()];
+    return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
   }
 
   void Register(StaticVisitorBase::VisitorId id, Callback callback) {
     ASSERT(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
-    callbacks_[id] = callback;
+    callbacks_[id] = reinterpret_cast<AtomicWord>(callback);
   }
 
   template<typename Visitor,
@@ -179,7 +188,7 @@
   }
 
  private:
-  Callback callbacks_[StaticVisitorBase::kVisitorIdCount];
+  AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
 };
 
 
diff --git a/src/objects.cc b/src/objects.cc
index 8cb36e9..6ce4c44 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -113,39 +113,47 @@
   if (IsSmi()) {
     return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0);
   }
-  if (IsUndefined() || IsNull()) {
-    return HeapObject::cast(this)->GetHeap()->false_value();
+  HeapObject* heap_object = HeapObject::cast(this);
+  if (heap_object->IsUndefined() || heap_object->IsNull()) {
+    return heap_object->GetHeap()->false_value();
   }
   // Undetectable object is false
-  if (IsUndetectableObject()) {
-    return HeapObject::cast(this)->GetHeap()->false_value();
+  if (heap_object->IsUndetectableObject()) {
+    return heap_object->GetHeap()->false_value();
   }
-  if (IsString()) {
-    return HeapObject::cast(this)->GetHeap()->ToBoolean(
+  if (heap_object->IsString()) {
+    return heap_object->GetHeap()->ToBoolean(
         String::cast(this)->length() != 0);
   }
-  if (IsHeapNumber()) {
+  if (heap_object->IsHeapNumber()) {
     return HeapNumber::cast(this)->HeapNumberToBoolean();
   }
-  return Isolate::Current()->heap()->true_value();
+  return heap_object->GetHeap()->true_value();
 }
 
 
 void Object::Lookup(String* name, LookupResult* result) {
-  if (IsJSObject()) return JSObject::cast(this)->Lookup(name, result);
   Object* holder = NULL;
-  if (IsString()) {
-    Heap* heap = HeapObject::cast(this)->GetHeap();
-    Context* global_context = heap->isolate()->context()->global_context();
-    holder = global_context->string_function()->instance_prototype();
-  } else if (IsNumber()) {
+  if (IsSmi()) {
     Heap* heap = Isolate::Current()->heap();
     Context* global_context = heap->isolate()->context()->global_context();
     holder = global_context->number_function()->instance_prototype();
-  } else if (IsBoolean()) {
-    Heap* heap = HeapObject::cast(this)->GetHeap();
-    Context* global_context = heap->isolate()->context()->global_context();
-    holder = global_context->boolean_function()->instance_prototype();
+  } else {
+    HeapObject* heap_object = HeapObject::cast(this);
+    if (heap_object->IsJSObject()) {
+      return JSObject::cast(this)->Lookup(name, result);
+    }
+    Heap* heap = heap_object->GetHeap();
+    if (heap_object->IsString()) {
+      Context* global_context = heap->isolate()->context()->global_context();
+      holder = global_context->string_function()->instance_prototype();
+    } else if (heap_object->IsHeapNumber()) {
+      Context* global_context = heap->isolate()->context()->global_context();
+      holder = global_context->number_function()->instance_prototype();
+    } else if (heap_object->IsBoolean()) {
+      Context* global_context = heap->isolate()->context()->global_context();
+      holder = global_context->boolean_function()->instance_prototype();
+    }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
   JSObject::cast(holder)->Lookup(name, result);
@@ -247,7 +255,6 @@
     LookupResult* result,
     String* name,
     PropertyAttributes* attributes) {
-  Heap* heap = name->GetHeap();
   if (result->IsProperty()) {
     switch (result->type()) {
       case CALLBACKS: {
@@ -299,6 +306,7 @@
 
   // No accessible property found.
   *attributes = ABSENT;
+  Heap* heap = name->GetHeap();
   heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
   return heap->undefined_value();
 }
@@ -309,7 +317,6 @@
     LookupResult* result,
     String* name,
     bool continue_search) {
-  Heap* heap = name->GetHeap();
   if (result->IsProperty()) {
     switch (result->type()) {
       case CALLBACKS: {
@@ -363,7 +370,7 @@
     }
   }
 
-  heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
   return ABSENT;
 }
 
@@ -397,11 +404,11 @@
                                              Object* value,
                                              PropertyDetails details) {
   ASSERT(!HasFastProperties());
-  Heap* heap = name->GetHeap();
   int entry = property_dictionary()->FindEntry(name);
   if (entry == StringDictionary::kNotFound) {
     Object* store_value = value;
     if (IsGlobalObject()) {
+      Heap* heap = name->GetHeap();
       MaybeObject* maybe_store_value =
           heap->AllocateJSGlobalPropertyCell(value);
       if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
@@ -433,7 +440,6 @@
 
 MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) {
   ASSERT(!HasFastProperties());
-  Heap* heap = GetHeap();
   StringDictionary* dictionary = property_dictionary();
   int entry = dictionary->FindEntry(name);
   if (entry != StringDictionary::kNotFound) {
@@ -441,7 +447,7 @@
     if (IsGlobalObject()) {
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.IsDontDelete()) {
-        if (mode != FORCE_DELETION) return heap->false_value();
+        if (mode != FORCE_DELETION) return GetHeap()->false_value();
         // When forced to delete global properties, we have to make a
         // map change to invalidate any ICs that think they can load
         // from the DontDelete cell without checking if it contains
@@ -454,13 +460,13 @@
       }
       JSGlobalPropertyCell* cell =
           JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
-      cell->set_value(heap->the_hole_value());
+      cell->set_value(cell->heap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
       return dictionary->DeleteProperty(entry, mode);
     }
   }
-  return heap->true_value();
+  return GetHeap()->true_value();
 }
 
 
@@ -550,22 +556,31 @@
 
 
 MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
-  if (IsJSObject()) {
-    return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
-  }
-
   Object* holder = NULL;
-  Context* global_context = Isolate::Current()->context()->global_context();
-  if (IsString()) {
-    holder = global_context->string_function()->instance_prototype();
-  } else if (IsNumber()) {
+  if (IsSmi()) {
+    Context* global_context = Isolate::Current()->context()->global_context();
     holder = global_context->number_function()->instance_prototype();
-  } else if (IsBoolean()) {
-    holder = global_context->boolean_function()->instance_prototype();
   } else {
-    // Undefined and null have no indexed properties.
-    ASSERT(IsUndefined() || IsNull());
-    return HEAP->undefined_value();
+    HeapObject* heap_object = HeapObject::cast(this);
+
+    if (heap_object->IsJSObject()) {
+      return JSObject::cast(this)->GetElementWithReceiver(receiver, index);
+    }
+    Heap* heap = heap_object->GetHeap();
+    Isolate* isolate = heap->isolate();
+
+    Context* global_context = isolate->context()->global_context();
+    if (heap_object->IsString()) {
+      holder = global_context->string_function()->instance_prototype();
+    } else if (heap_object->IsHeapNumber()) {
+      holder = global_context->number_function()->instance_prototype();
+    } else if (heap_object->IsBoolean()) {
+      holder = global_context->boolean_function()->instance_prototype();
+    } else {
+      // Undefined and null have no indexed properties.
+      ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
+      return heap->undefined_value();
+    }
   }
 
   return JSObject::cast(holder)->GetElementWithReceiver(receiver, index);
@@ -573,14 +588,28 @@
 
 
 Object* Object::GetPrototype() {
+  if (IsSmi()) {
+    Heap* heap = Isolate::Current()->heap();
+    Context* context = heap->isolate()->context()->global_context();
+    return context->number_function()->instance_prototype();
+  }
+
+  HeapObject* heap_object = HeapObject::cast(this);
+
   // The object is either a number, a string, a boolean, or a real JS object.
-  if (IsJSObject()) return JSObject::cast(this)->map()->prototype();
-  Heap* heap = Isolate::Current()->heap();
+  if (heap_object->IsJSObject()) {
+    return JSObject::cast(this)->map()->prototype();
+  }
+  Heap* heap = heap_object->GetHeap();
   Context* context = heap->isolate()->context()->global_context();
 
-  if (IsNumber()) return context->number_function()->instance_prototype();
-  if (IsString()) return context->string_function()->instance_prototype();
-  if (IsBoolean()) {
+  if (heap_object->IsHeapNumber()) {
+    return context->number_function()->instance_prototype();
+  }
+  if (heap_object->IsString()) {
+    return context->string_function()->instance_prototype();
+  }
+  if (heap_object->IsBoolean()) {
     return context->boolean_function()->instance_prototype();
   } else {
     return heap->null_value();
@@ -908,8 +937,9 @@
     // All other JSObjects are rather similar to each other (JSObject,
     // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
     default: {
-      Heap* heap = GetHeap();
-      Object* constructor = map()->constructor();
+      Map* map_of_this = map();
+      Heap* heap = map_of_this->heap();
+      Object* constructor = map_of_this->constructor();
       bool printed = false;
       if (constructor->IsHeapObject() &&
           !heap->Contains(HeapObject::cast(constructor))) {
@@ -1249,6 +1279,22 @@
 }
 
 
+static bool IsIdentifier(UnicodeCache* cache,
+                         unibrow::CharacterStream* buffer) {
+  // Checks whether the buffer contains an identifier (no escape).
+  if (!buffer->has_more()) return false;
+  if (!cache->IsIdentifierStart(buffer->GetNext())) {
+    return false;
+  }
+  while (buffer->has_more()) {
+    if (!cache->IsIdentifierPart(buffer->GetNext())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
 MaybeObject* JSObject::AddFastProperty(String* name,
                                        Object* value,
                                        PropertyAttributes attributes) {
@@ -1258,7 +1304,7 @@
   // hidden symbols) and is not a real identifier.
   Isolate* isolate = GetHeap()->isolate();
   StringInputBuffer buffer(name);
-  if (!isolate->scanner_constants()->IsIdentifier(&buffer)
+  if (!IsIdentifier(isolate->unicode_cache(), &buffer)
       && name != isolate->heap()->hidden_symbol()) {
     Object* obj;
     { MaybeObject* maybe_obj =
@@ -1350,8 +1396,7 @@
     String* name,
     JSFunction* function,
     PropertyAttributes attributes) {
-  Heap* heap = GetHeap();
-  ASSERT(!heap->InNewSpace(function));
+  ASSERT(!GetHeap()->InNewSpace(function));
 
   // Allocate new instance descriptors with (name, function) added
   ConstantFunctionDescriptor d(name, function, attributes);
@@ -1376,6 +1421,7 @@
 
   // If the old map is the global object map (from new Object()),
   // then transitions are not added to it, so we are done.
+  Heap* heap = old_map->heap();
   if (old_map == heap->isolate()->context()->global_context()->
       object_function()->map()) {
     return function;
@@ -1412,7 +1458,6 @@
                                        Object* value,
                                        PropertyAttributes attributes) {
   ASSERT(!HasFastProperties());
-  Heap* heap = GetHeap();
   StringDictionary* dict = property_dictionary();
   Object* store_value = value;
   if (IsGlobalObject()) {
@@ -1429,6 +1474,7 @@
       dict->SetEntry(entry, name, store_value, details);
       return value;
     }
+    Heap* heap = GetHeap();
     { MaybeObject* maybe_store_value =
           heap->AllocateJSGlobalPropertyCell(value);
       if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
@@ -1450,8 +1496,9 @@
                                    PropertyAttributes attributes,
                                    StrictModeFlag strict_mode) {
   ASSERT(!IsJSGlobalProxy());
-  Heap* heap = GetHeap();
-  if (!map()->is_extensible()) {
+  Map* map_of_this = map();
+  Heap* heap = map_of_this->heap();
+  if (!map_of_this->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
       return heap->undefined_value();
     } else {
@@ -1463,7 +1510,7 @@
   }
   if (HasFastProperties()) {
     // Ensure the descriptor array does not get too big.
-    if (map()->instance_descriptors()->number_of_descriptors() <
+    if (map_of_this->instance_descriptors()->number_of_descriptors() <
         DescriptorArray::kMaxNumberOfDescriptors) {
       if (value->IsJSFunction() && !heap->InNewSpace(value)) {
         return AddConstantFunctionProperty(name,
@@ -1537,7 +1584,7 @@
     return result;
   }
   // Do not add transitions to the map of "new Object()".
-  if (map() == GetHeap()->isolate()->context()->global_context()->
+  if (map() == old_map->heap()->isolate()->context()->global_context()->
       object_function()->map()) {
     return result;
   }
@@ -1836,8 +1883,9 @@
 
 MaybeObject* Map::GetExternalArrayElementsMap(ExternalArrayType array_type,
                                               bool safe_to_add_transition) {
+  Heap* current_heap = heap();
   DescriptorArray* descriptors = instance_descriptors();
-  String* external_array_sentinel_name = GetIsolate()->heap()->empty_symbol();
+  String* external_array_sentinel_name = current_heap->empty_symbol();
 
   if (safe_to_add_transition) {
     // It's only safe to manipulate the descriptor array if it would be
@@ -1845,7 +1893,8 @@
 
     ASSERT(!is_shared());  // no transitions can be added to shared maps.
     // Check if the external array transition already exists.
-    DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+    DescriptorLookupCache* cache =
+        current_heap->isolate()->descriptor_lookup_cache();
     int index = cache->Lookup(descriptors, external_array_sentinel_name);
     if (index == DescriptorLookupCache::kAbsent) {
       index = descriptors->Search(external_array_sentinel_name);
@@ -1979,7 +2028,6 @@
                                                         String* name,
                                                         Object* value,
                                                         bool check_prototype) {
-  Heap* heap = GetHeap();
   if (check_prototype && !result->IsProperty()) {
     LookupCallbackSetterInPrototypes(name, result);
   }
@@ -2020,6 +2068,7 @@
 
   HandleScope scope;
   Handle<Object> value_handle(value);
+  Heap* heap = GetHeap();
   heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
   return *value_handle;
 }
@@ -2157,7 +2206,6 @@
     String* name,
     Object* value,
     PropertyAttributes attributes) {
-  Heap* heap = GetHeap();
 
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
@@ -2165,9 +2213,11 @@
   LookupResult result;
   LocalLookup(name, &result);
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()
-      && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+      return SetPropertyWithFailedAccessCheck(&result, name, value, false);
+    }
   }
 
   if (IsJSGlobalProxy()) {
@@ -2318,14 +2368,15 @@
                                                   LookupResult* result,
                                                   String* name,
                                                   bool continue_search) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
-    return GetPropertyAttributeWithFailedAccessCheck(receiver,
-                                                     result,
-                                                     name,
-                                                     continue_search);
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) {
+      return GetPropertyAttributeWithFailedAccessCheck(receiver,
+                                                       result,
+                                                       name,
+                                                       continue_search);
+    }
   }
   if (result->IsProperty()) {
     switch (result->type()) {
@@ -2465,10 +2516,10 @@
   // JSGlobalProxy must never be normalized
   ASSERT(!IsJSGlobalProxy());
 
-  Heap* heap = GetHeap();
+  Map* map_of_this = map();
 
   // Allocate new content.
-  int property_count = map()->NumberOfDescribedProperties();
+  int property_count = map_of_this->NumberOfDescribedProperties();
   if (expected_additional_properties > 0) {
     property_count += expected_additional_properties;
   } else {
@@ -2481,9 +2532,9 @@
   }
   StringDictionary* dictionary = StringDictionary::cast(obj);
 
-  DescriptorArray* descs = map()->instance_descriptors();
+  DescriptorArray* descs = map_of_this->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details = descs->GetDetails(i);
+    PropertyDetails details(descs->GetDetails(i));
     switch (details.type()) {
       case CONSTANT_FUNCTION: {
         PropertyDetails d =
@@ -2531,11 +2582,14 @@
     }
   }
 
+  Heap* current_heap = map_of_this->heap();
+
   // Copy the next enumeration index from instance descriptor.
-  int index = map()->instance_descriptors()->NextEnumerationIndex();
+  int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
   dictionary->SetNextEnumerationIndex(index);
 
-  { MaybeObject* maybe_obj = heap->isolate()->context()->global_context()->
+  { MaybeObject* maybe_obj =
+        current_heap->isolate()->context()->global_context()->
         normalized_map_cache()->Get(this, mode);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
@@ -2546,17 +2600,17 @@
 
   // Resize the object in the heap if necessary.
   int new_instance_size = new_map->instance_size();
-  int instance_size_delta = map()->instance_size() - new_instance_size;
+  int instance_size_delta = map_of_this->instance_size() - new_instance_size;
   ASSERT(instance_size_delta >= 0);
-  heap->CreateFillerObjectAt(this->address() + new_instance_size,
-                             instance_size_delta);
+  current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
+                                     instance_size_delta);
 
   set_map(new_map);
-  map()->set_instance_descriptors(heap->empty_descriptor_array());
+  new_map->set_instance_descriptors(current_heap->empty_descriptor_array());
 
   set_properties(dictionary);
 
-  heap->isolate()->counters()->props_to_dictionary()->Increment();
+  current_heap->isolate()->counters()->props_to_dictionary()->Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -2579,10 +2633,11 @@
 MaybeObject* JSObject::NormalizeElements() {
   ASSERT(!HasExternalArrayElements());
   if (HasDictionaryElements()) return this;
-  ASSERT(map()->has_fast_elements());
+  Map* old_map = map();
+  ASSERT(old_map->has_fast_elements());
 
   Object* obj;
-  { MaybeObject* maybe_obj = map()->GetSlowElementsMap();
+  { MaybeObject* maybe_obj = old_map->GetSlowElementsMap();
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
@@ -2617,7 +2672,7 @@
   set_map(new_map);
   set_elements(dictionary);
 
-  new_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+  new_map->heap()->isolate()->counters()->elements_to_dictionary()->
       Increment();
 
 #ifdef DEBUG
@@ -2634,10 +2689,9 @@
 MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
                                                      DeleteMode mode) {
   // Check local property, ignore interceptor.
-  Heap* heap = GetHeap();
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
-  if (!result.IsProperty()) return heap->true_value();
+  if (!result.IsProperty()) return GetHeap()->true_value();
 
   // Normalize object if needed.
   Object* obj;
@@ -2683,7 +2737,6 @@
 
 MaybeObject* JSObject::DeleteElementPostInterceptor(uint32_t index,
                                                     DeleteMode mode) {
-  Heap* heap = GetHeap();
   ASSERT(!HasExternalArrayElements());
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
@@ -2711,7 +2764,7 @@
       UNREACHABLE();
       break;
   }
-  return heap->true_value();
+  return GetHeap()->true_value();
 }
 
 
@@ -2884,16 +2937,17 @@
 
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
-  Heap* heap = GetHeap();
+  Map* map_of_this = map();
+  Heap* heap = map_of_this->heap();
   AssertNoAllocation no_alloc;
 
   // Is the object the constructor for this object?
-  if (map()->constructor() == obj) {
+  if (map_of_this->constructor() == obj) {
     return true;
   }
 
   // Is the object the prototype for this object?
-  if (map()->prototype() == obj) {
+  if (map_of_this->prototype() == obj) {
     return true;
   }
 
@@ -3503,7 +3557,6 @@
 
 
 Object* JSObject::SlowReverseLookup(Object* value) {
-  Heap* heap = GetHeap();
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -3517,7 +3570,7 @@
         }
       }
     }
-    return heap->undefined_value();
+    return GetHeap()->undefined_value();
   } else {
     return property_dictionary()->SlowReverseLookup(value);
   }
@@ -3621,7 +3674,7 @@
   // Allocate the code cache if not present.
   if (code_cache()->IsFixedArray()) {
     Object* result;
-    { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
+    { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     set_code_cache(result);
@@ -3807,7 +3860,6 @@
 
 
 Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) {
-  Heap* heap = GetHeap();
   FixedArray* cache = default_cache();
   int length = cache->length();
   for (int i = 0; i < length; i += kCodeCacheEntrySize) {
@@ -3822,7 +3874,7 @@
       }
     }
   }
-  return heap->undefined_value();
+  return GetHeap()->undefined_value();
 }
 
 
@@ -3913,7 +3965,7 @@
   MUST_USE_RESULT MaybeObject* AsObject() {
     ASSERT(code_ != NULL);
     Object* obj;
-    { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
@@ -3991,7 +4043,6 @@
 
 
 MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
-  Heap* heap = GetHeap();
   ASSERT(!array->HasExternalArrayElements());
   switch (array->GetElementsKind()) {
     case JSObject::FAST_ELEMENTS:
@@ -4002,7 +4053,7 @@
 
       // Allocate a temporary fixed array.
       Object* object;
-      { MaybeObject* maybe_object = heap->AllocateFixedArray(size);
+      { MaybeObject* maybe_object = GetHeap()->AllocateFixedArray(size);
         if (!maybe_object->ToObject(&object)) return maybe_object;
       }
       FixedArray* key_array = FixedArray::cast(object);
@@ -4022,12 +4073,11 @@
       UNREACHABLE();
   }
   UNREACHABLE();
-  return heap->null_value();  // Failure case needs to "return" a value.
+  return GetHeap()->null_value();  // Failure case needs to "return" a value.
 }
 
 
 MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
-  Heap* heap = GetHeap();
   int len0 = length();
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) {
@@ -4053,7 +4103,7 @@
 
   // Allocate the result
   Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArray(len0 + extra);
+  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(len0 + extra);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   // Fill in the content
@@ -5373,7 +5423,7 @@
   if (StringShape(this).IsSymbol()) return false;
 
   Map* map = this->map();
-  Heap* heap = map->GetHeap();
+  Heap* heap = map->heap();
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
@@ -5389,8 +5439,8 @@
 bool String::IsEqualTo(Vector<const char> str) {
   Isolate* isolate = GetIsolate();
   int slen = length();
-  Access<ScannerConstants::Utf8Decoder>
-      decoder(isolate->scanner_constants()->utf8_decoder());
+  Access<UnicodeCache::Utf8Decoder>
+      decoder(isolate->unicode_cache()->utf8_decoder());
   decoder->Reset(str.start(), str.length());
   int i;
   for (i = 0; i < slen && decoder->has_more(); i++) {
@@ -5702,17 +5752,18 @@
   // used for constructing objects to the original object prototype.
   // See ECMA-262 13.2.2.
   if (!value->IsJSObject()) {
-    Heap* heap = GetHeap();
     // Copy the map so this does not affect unrelated functions.
     // Remove map transitions because they point to maps with a
     // different prototype.
-    Object* new_map;
+    Object* new_object;
     { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
-      if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+      if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
     }
-    set_map(Map::cast(new_map));
-    map()->set_constructor(value);
-    map()->set_non_instance_prototype(true);
+    Map* new_map = Map::cast(new_object);
+    Heap* heap = new_map->heap();
+    set_map(new_map);
+    new_map->set_constructor(value);
+    new_map->set_non_instance_prototype(true);
     construct_prototype =
         heap->isolate()->context()->global_context()->
             initial_object_prototype();
@@ -5740,7 +5791,7 @@
   ASSERT(shared()->strict_mode() || map() == global_context->function_map());
 
   set_map(no_prototype_map);
-  set_prototype_or_initial_map(GetHeap()->the_hole_value());
+  set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
   return this;
 }
 
@@ -5822,8 +5873,6 @@
 
 
 bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
-  Heap* heap = GetHeap();
-
   // Check the basic conditions for generating inline constructor code.
   if (!FLAG_inline_new
       || !has_only_simple_this_property_assignments()
@@ -5837,6 +5886,8 @@
     return true;
   }
 
+  Heap* heap = GetHeap();
+
   // Traverse the proposed prototype chain looking for setters for properties of
   // the same names as are set by the inline constructor.
   for (Object* obj = prototype;
@@ -6156,7 +6207,7 @@
 
 
 void Code::InvalidateRelocation() {
-  set_relocation_info(GetHeap()->empty_byte_array());
+  set_relocation_info(heap()->empty_byte_array());
 }
 
 
@@ -6456,7 +6507,6 @@
     case KEYED_EXTERNAL_ARRAY_STORE_IC: return "KEYED_EXTERNAL_ARRAY_STORE_IC";
     case CALL_IC: return "CALL_IC";
     case KEYED_CALL_IC: return "KEYED_CALL_IC";
-    case BINARY_OP_IC: return "BINARY_OP_IC";
     case TYPE_RECORDING_BINARY_OP_IC: return "TYPE_RECORDING_BINARY_OP_IC";
     case COMPARE_IC: return "COMPARE_IC";
   }
@@ -6734,7 +6784,6 @@
 
 
 MaybeObject* JSObject::SetElementsLength(Object* len) {
-  Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(AllowsSetElementsLength());
 
@@ -6742,7 +6791,7 @@
   Object* smi_length = Smi::FromInt(0);
   if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
     const int value = Smi::cast(smi_length)->value();
-    if (value < 0) return ArrayLengthRangeError(heap);
+    if (value < 0) return ArrayLengthRangeError(GetHeap());
     switch (GetElementsKind()) {
       case FAST_ELEMENTS: {
         int old_capacity = FixedArray::cast(elements())->length();
@@ -6808,14 +6857,14 @@
     if (len->ToArrayIndex(&length)) {
       return SetSlowElements(len);
     } else {
-      return ArrayLengthRangeError(heap);
+      return ArrayLengthRangeError(GetHeap());
     }
   }
 
   // len is not a number so make the array size one and
   // set only element to len.
   Object* obj;
-  { MaybeObject* maybe_obj = heap->AllocateFixedArray(1);
+  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   FixedArray::cast(obj)->set(0, len);
@@ -6832,6 +6881,22 @@
   // SpiderMonkey behaves this way.
   if (!value->IsJSObject() && !value->IsNull()) return value;
 
+  // From 8.6.2 Object Internal Methods
+  // ...
+  // In addition, if [[Extensible]] is false the value of the [[Class]] and
+  // [[Prototype]] internal properties of the object may not be modified.
+  // ...
+  // Implementation specific extensions that modify [[Class]], [[Prototype]]
+  // or [[Extensible]] must not violate the invariants defined in the preceding
+  // paragraph.
+  if (!this->map()->is_extensible()) {
+    HandleScope scope;
+    Handle<Object> handle(this, heap->isolate());
+    return heap->isolate()->Throw(
+        *FACTORY->NewTypeError("non_extensible_proto",
+                               HandleVector<Object>(&handle, 1)));
+  }
+
   // Before we can set the prototype we need to be sure
   // prototype cycles are prevented.
   // It is sufficient to validate that the receiver is not in the new prototype
@@ -6970,13 +7035,13 @@
 
 
 JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
-  Heap* heap = GetHeap();
-
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return UNDEFINED_ELEMENT;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return UNDEFINED_ELEMENT;
+    }
   }
 
   if (IsJSGlobalProxy()) {
@@ -7042,13 +7107,13 @@
 
 
 bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) {
-  Heap* heap = GetHeap();
-
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   // Check for lookup interceptor
@@ -7320,14 +7385,15 @@
                                   Object* value,
                                   StrictModeFlag strict_mode,
                                   bool check_prototype) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
-    HandleScope scope;
-    Handle<Object> value_handle(value);
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
-    return *value_handle;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+      HandleScope scope;
+      Handle<Object> value_handle(value);
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+      return *value_handle;
+    }
   }
 
   if (IsJSGlobalProxy()) {
@@ -7522,7 +7588,6 @@
 
 MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
                                                  uint32_t index) {
-  Heap* heap = GetHeap();
   // Get element works for both JSObject and JSArray since
   // JSArray::length cannot change.
   switch (GetElementsKind()) {
@@ -7571,7 +7636,7 @@
 
   // Continue searching via the prototype chain.
   Object* pt = GetPrototype();
-  if (pt->IsNull()) return heap->undefined_value();
+  if (pt->IsNull()) return GetHeap()->undefined_value();
   return pt->GetElementWithReceiver(receiver, index);
 }
 
@@ -7586,7 +7651,6 @@
   Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
   Handle<Object> this_handle(receiver, isolate);
   Handle<JSObject> holder_handle(this, isolate);
-
   if (!interceptor->getter()->IsUndefined()) {
     v8::IndexedPropertyGetter getter =
         v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
@@ -7613,12 +7677,13 @@
 
 MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
                                               uint32_t index) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
-    return heap->undefined_value();
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_GET)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_GET);
+      return heap->undefined_value();
+    }
   }
 
   if (HasIndexedInterceptor()) {
@@ -7669,6 +7734,7 @@
   }
 
   Object* pt = GetPrototype();
+  Heap* heap = GetHeap();
   if (pt == heap->null_value()) return heap->undefined_value();
   return pt->GetElementWithReceiver(receiver, index);
 }
@@ -7895,7 +7961,6 @@
     JSObject* receiver,
     String* name,
     PropertyAttributes* attributes) {
-  Heap* heap = GetHeap();
   // Check local property in holder, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
@@ -7905,7 +7970,7 @@
   // Continue searching via the prototype chain.
   Object* pt = GetPrototype();
   *attributes = ABSENT;
-  if (pt->IsNull()) return heap->undefined_value();
+  if (pt->IsNull()) return GetHeap()->undefined_value();
   return pt->GetPropertyWithReceiver(receiver, name, attributes);
 }
 
@@ -7914,14 +7979,13 @@
     JSObject* receiver,
     String* name,
     PropertyAttributes* attributes) {
-  Heap* heap = GetHeap();
   // Check local property in holder, ignore interceptor.
   LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
   }
-  return heap->undefined_value();
+  return GetHeap()->undefined_value();
 }
 
 
@@ -7966,12 +8030,13 @@
 
 
 bool JSObject::HasRealNamedProperty(String* key) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   LookupResult result;
@@ -7981,12 +8046,13 @@
 
 
 bool JSObject::HasRealElementProperty(uint32_t index) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   // Handle [] on String objects.
@@ -8025,17 +8091,18 @@
   }
   // All possibilities have been handled above already.
   UNREACHABLE();
-  return heap->null_value();
+  return GetHeap()->null_value();
 }
 
 
 bool JSObject::HasRealNamedCallbackProperty(String* key) {
-  Heap* heap = GetHeap();
   // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-    heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
-    return false;
+  if (IsAccessCheckNeeded()) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      return false;
+    }
   }
 
   LookupResult result;
@@ -8049,7 +8116,7 @@
     DescriptorArray* descs = map()->instance_descriptors();
     int result = 0;
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
-      PropertyDetails details = descs->GetDetails(i);
+      PropertyDetails details(descs->GetDetails(i));
       if (details.IsProperty() && (details.attributes() & filter) == 0) {
         result++;
       }
@@ -8674,7 +8741,6 @@
 
 template<typename Shape, typename Key>
 MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
-  Heap* heap = GetHeap();
   int capacity = Capacity();
   int nof = NumberOfElements() + n;
   int nod = NumberOfDeletedElements();
@@ -8688,7 +8754,7 @@
 
   const int kMinCapacityForPretenure = 256;
   bool pretenure =
-      (capacity > kMinCapacityForPretenure) && !heap->InNewSpace(this);
+      (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this);
   Object* obj;
   { MaybeObject* maybe_obj =
         Allocate(nof * 2, pretenure ? TENURED : NOT_TENURED);
@@ -8820,7 +8886,6 @@
 // Collates undefined and unexisting elements below limit from position
 // zero of the elements. The object stays in Dictionary mode.
 MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
-  Heap* heap = GetHeap();
   ASSERT(HasDictionaryElements());
   // Must stay in dictionary mode, either because of requires_slow_elements,
   // or because we are not going to sort (and therefore compact) all of the
@@ -8830,7 +8895,7 @@
   if (limit > static_cast<uint32_t>(Smi::kMaxValue)) {
     // Allocate space for result before we start mutating the object.
     Object* new_double;
-    { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0);
+    { MaybeObject* maybe_new_double = GetHeap()->AllocateHeapNumber(0.0);
       if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double;
     }
     result_double = HeapNumber::cast(new_double);
@@ -8890,6 +8955,7 @@
 
   uint32_t result = pos;
   PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+  Heap* heap = GetHeap();
   while (undefs > 0) {
     if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
       // Adding an entry with the key beyond smi-range requires
@@ -8919,9 +8985,10 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
-  Heap* heap = GetHeap();
   ASSERT(!HasExternalArrayElements());
 
+  Heap* heap = GetHeap();
+
   if (HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
     // Ordering is irrelevant, since we are going to sort anyway.
@@ -9175,9 +9242,9 @@
 
 MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
   ASSERT(!HasFastProperties());
-  Heap* heap = GetHeap();
   int entry = property_dictionary()->FindEntry(name);
   if (entry == StringDictionary::kNotFound) {
+    Heap* heap = GetHeap();
     Object* cell;
     { MaybeObject* maybe_cell =
           heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
@@ -9352,10 +9419,9 @@
 
 
 Object* CompilationCacheTable::Lookup(String* src) {
-  Heap* heap = GetHeap();
   StringKey key(src);
   int entry = FindEntry(&key);
-  if (entry == kNotFound) return heap->undefined_value();
+  if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
@@ -9372,10 +9438,9 @@
 
 Object* CompilationCacheTable::LookupRegExp(String* src,
                                             JSRegExp::Flags flags) {
-  Heap* heap = GetHeap();
   RegExpKey key(src, flags);
   int entry = FindEntry(&key);
-  if (entry == kNotFound) return heap->undefined_value();
+  if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
@@ -9495,10 +9560,9 @@
 
 
 Object* MapCache::Lookup(FixedArray* array) {
-  Heap* heap = GetHeap();
   SymbolsKey key(array);
   int entry = FindEntry(&key);
-  if (entry == kNotFound) return heap->undefined_value();
+  if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
@@ -9619,7 +9683,7 @@
     if (key->IsNumber()) {
       uint32_t number = static_cast<uint32_t>(key->Number());
       if (from <= number && number < to) {
-        SetEntry(i, sentinel, sentinel, Smi::FromInt(0));
+        SetEntry(i, sentinel, sentinel);
         removed_entries++;
       }
     }
@@ -9639,7 +9703,7 @@
   if (details.IsDontDelete() && mode != JSObject::FORCE_DELETION) {
     return heap->false_value();
   }
-  SetEntry(entry, heap->null_value(), heap->null_value(), Smi::FromInt(0));
+  SetEntry(entry, heap->null_value(), heap->null_value());
   HashTable<Shape, Key>::ElementRemoved();
   return heap->true_value();
 }
@@ -9854,7 +9918,6 @@
 // Backwards lookup (slow).
 template<typename Shape, typename Key>
 Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) {
-  Heap* heap = Dictionary<Shape, Key>::GetHeap();
   int capacity = HashTable<Shape, Key>::Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k =  HashTable<Shape, Key>::KeyAt(i);
@@ -9866,13 +9929,13 @@
       if (e == value) return k;
     }
   }
+  Heap* heap = Dictionary<Shape, Key>::GetHeap();
   return heap->undefined_value();
 }
 
 
 MaybeObject* StringDictionary::TransformPropertiesToFastFor(
     JSObject* obj, int unused_property_fields) {
-  Heap* heap = GetHeap();
   // Make sure we preserve dictionary representation if there are too many
   // descriptors.
   if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj;
@@ -9892,6 +9955,8 @@
   int instance_descriptor_length = 0;
   int number_of_fields = 0;
 
+  Heap* heap = GetHeap();
+
   // Compute the length of the instance descriptor.
   int capacity = Capacity();
   for (int i = 0; i < capacity; i++) {
@@ -10020,12 +10085,11 @@
 
 // Get the break point info object for this code position.
 Object* DebugInfo::GetBreakPointInfo(int code_position) {
-  Heap* heap = GetHeap();
   // Find the index of the break point info object for this code position.
   int index = GetBreakPointInfoIndex(code_position);
 
   // Return the break point info object if any.
-  if (index == kNoBreakPointInfo) return heap->undefined_value();
+  if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
   return BreakPointInfo::cast(break_points()->get(index));
 }
 
@@ -10098,10 +10162,9 @@
 
 // Get the break point objects for a code position.
 Object* DebugInfo::GetBreakPointObjects(int code_position) {
-  Heap* heap = GetHeap();
   Object* break_point_info = GetBreakPointInfo(code_position);
   if (break_point_info->IsUndefined()) {
-    return heap->undefined_value();
+    return GetHeap()->undefined_value();
   }
   return BreakPointInfo::cast(break_point_info)->break_point_objects();
 }
@@ -10124,7 +10187,7 @@
 
 Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
                                       Handle<Object> break_point_object) {
-  Heap* heap = Isolate::Current()->heap();
+  Heap* heap = debug_info->GetHeap();
   if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
   for (int i = 0; i < debug_info->break_points()->length(); i++) {
     if (!debug_info->break_points()->get(i)->IsUndefined()) {
diff --git a/src/objects.h b/src/objects.h
index 96e5cb6..03445e8 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -167,7 +167,7 @@
   }
 
   // Conversion for storing details as Object*.
-  inline PropertyDetails(Smi* smi);
+  explicit inline PropertyDetails(Smi* smi);
   inline Smi* AsSmi();
 
   PropertyType type() { return TypeField::decode(value_); }
@@ -1293,14 +1293,9 @@
   // is a mixture of sign, exponent and mantissa.  Our current platforms are all
   // little endian apart from non-EABI arm which is little endian with big
   // endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
   static const int kMantissaOffset = kValueOffset;
   static const int kExponentOffset = kValueOffset + 4;
-#else
-  static const int kMantissaOffset = kValueOffset + 4;
-  static const int kExponentOffset = kValueOffset;
-# define BIG_ENDIAN_FLOATING_POINT 1
-#endif
+
   static const int kSize = kValueOffset + kDoubleSize;
   static const uint32_t kSignMask = 0x80000000u;
   static const uint32_t kExponentMask = 0x7ff00000u;
@@ -2576,6 +2571,9 @@
   // Sets the entry to (key, value) pair.
   inline void SetEntry(int entry,
                        Object* key,
+                       Object* value);
+  inline void SetEntry(int entry,
+                       Object* key,
                        Object* value,
                        PropertyDetails details);
 
@@ -3239,7 +3237,6 @@
     STORE_IC,
     KEYED_STORE_IC,
     KEYED_EXTERNAL_ARRAY_STORE_IC,
-    BINARY_OP_IC,
     TYPE_RECORDING_BINARY_OP_IC,
     COMPARE_IC,
     // No more than 16 kinds. The value currently encoded in four bits in
@@ -3308,7 +3305,6 @@
   inline bool is_keyed_store_stub() { return kind() == KEYED_STORE_IC; }
   inline bool is_call_stub() { return kind() == CALL_IC; }
   inline bool is_keyed_call_stub() { return kind() == KEYED_CALL_IC; }
-  inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; }
   inline bool is_type_recording_binary_op_stub() {
     return kind() == TYPE_RECORDING_BINARY_OP_IC;
   }
@@ -3366,10 +3362,6 @@
   inline ExternalArrayType external_array_type();
   inline void set_external_array_type(ExternalArrayType value);
 
-  // [binary op type]: For all BINARY_OP_IC.
-  inline byte binary_op_type();
-  inline void set_binary_op_type(byte value);
-
   // [type-recording binary op type]: For all TYPE_RECORDING_BINARY_OP_IC.
   inline byte type_recording_binary_op_type();
   inline void set_type_recording_binary_op_type(byte value);
@@ -3487,6 +3479,10 @@
   void CodeVerify();
 #endif
 
+  // Returns the isolate/heap this code object belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -4255,9 +4251,6 @@
   // this.x = y; where y is either a constant or refers to an argument.
   inline bool has_only_simple_this_property_assignments();
 
-  inline bool try_full_codegen();
-  inline void set_try_full_codegen(bool flag);
-
   // Indicates if this function can be lazy compiled.
   // This is used to determine if we can safely flush code from a function
   // when doing GC if we expect that the function will no longer be used.
@@ -4457,13 +4450,12 @@
 
   // Bit positions in compiler_hints.
   static const int kHasOnlySimpleThisPropertyAssignments = 0;
-  static const int kTryFullCodegen = 1;
-  static const int kAllowLazyCompilation = 2;
-  static const int kLiveObjectsMayExist = 3;
-  static const int kCodeAgeShift = 4;
+  static const int kAllowLazyCompilation = 1;
+  static const int kLiveObjectsMayExist = 2;
+  static const int kCodeAgeShift = 3;
   static const int kCodeAgeMask = 0x7;
-  static const int kOptimizationDisabled = 7;
-  static const int kStrictModeFunction = 8;
+  static const int kOptimizationDisabled = 6;
+  static const int kStrictModeFunction = 7;
 
  private:
 #if V8_HOST_ARCH_32_BIT
@@ -4534,6 +4526,9 @@
   // Tells whether or not this function has been optimized.
   inline bool IsOptimized();
 
+  // Tells whether or not this function can be optimized.
+  inline bool IsOptimizable();
+
   // Mark this function for lazy recompilation. The function will be
   // recompiled the next time it is executed.
   void MarkForLazyRecompilation();
@@ -5158,7 +5153,7 @@
 
 class StringHasher {
  public:
-  inline StringHasher(int length);
+  explicit inline StringHasher(int length);
 
   // Returns true if the hash of this string can be computed without
   // looking at the contents.
@@ -5905,7 +5900,7 @@
  public:
   virtual void Seek(unsigned pos);
   inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
-  inline StringInputBuffer(String* backing):
+  explicit inline StringInputBuffer(String* backing):
       unibrow::InputBuffer<String, String*, 1024>(backing) {}
 };
 
@@ -5916,7 +5911,7 @@
   virtual void Seek(unsigned pos);
   inline SafeStringInputBuffer()
       : unibrow::InputBuffer<String, String**, 256>() {}
-  inline SafeStringInputBuffer(String** backing)
+  explicit inline SafeStringInputBuffer(String** backing)
       : unibrow::InputBuffer<String, String**, 256>(backing) {}
 };
 
@@ -6009,6 +6004,10 @@
                               kValueOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
+  // Returns the isolate/heap this cell object belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
 };
diff --git a/src/parser.cc b/src/parser.cc
index 13e0c33..cf84bfa 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,7 +42,6 @@
 #include "string-stream.h"
 
 #include "ast-inl.h"
-#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -88,12 +87,13 @@
 
 
 RegExpBuilder::RegExpBuilder()
-  : pending_empty_(false),
-    characters_(NULL),
-    terms_(),
-    alternatives_()
+    : zone_(Isolate::Current()->zone()),
+      pending_empty_(false),
+      characters_(NULL),
+      terms_(),
+      alternatives_()
 #ifdef DEBUG
-  , last_added_(ADD_NONE)
+    , last_added_(ADD_NONE)
 #endif
   {}
 
@@ -101,7 +101,7 @@
 void RegExpBuilder::FlushCharacters() {
   pending_empty_ = false;
   if (characters_ != NULL) {
-    RegExpTree* atom = new RegExpAtom(characters_->ToConstVector());
+    RegExpTree* atom = new(zone()) RegExpAtom(characters_->ToConstVector());
     characters_ = NULL;
     text_.Add(atom);
     LAST(ADD_ATOM);
@@ -117,7 +117,7 @@
   } else if (num_text == 1) {
     terms_.Add(text_.last());
   } else {
-    RegExpText* text = new RegExpText();
+    RegExpText* text = new(zone()) RegExpText();
     for (int i = 0; i < num_text; i++)
       text_.Get(i)->AppendToText(text);
     terms_.Add(text);
@@ -178,7 +178,7 @@
   } else if (num_terms == 1) {
     alternative = terms_.last();
   } else {
-    alternative = new RegExpAlternative(terms_.GetList());
+    alternative = new(zone()) RegExpAlternative(terms_.GetList());
   }
   alternatives_.Add(alternative);
   terms_.Clear();
@@ -195,7 +195,7 @@
   if (num_alternatives == 1) {
     return alternatives_.last();
   }
-  return new RegExpDisjunction(alternatives_.GetList());
+  return new(zone()) RegExpDisjunction(alternatives_.GetList());
 }
 
 
@@ -214,11 +214,11 @@
     int num_chars = char_vector.length();
     if (num_chars > 1) {
       Vector<const uc16> prefix = char_vector.SubVector(0, num_chars - 1);
-      text_.Add(new RegExpAtom(prefix));
+      text_.Add(new(zone()) RegExpAtom(prefix));
       char_vector = char_vector.SubVector(num_chars - 1, num_chars);
     }
     characters_ = NULL;
-    atom = new RegExpAtom(char_vector);
+    atom = new(zone()) RegExpAtom(char_vector);
     FlushText();
   } else if (text_.length() > 0) {
     ASSERT(last_added_ == ADD_ATOM);
@@ -241,7 +241,7 @@
     UNREACHABLE();
     return;
   }
-  terms_.Add(new RegExpQuantifier(min, max, type, atom));
+  terms_.Add(new(zone()) RegExpQuantifier(min, max, type, atom));
   LAST(ADD_TERM);
 }
 
@@ -408,7 +408,7 @@
 
 
 Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
-  Scope* result = new Scope(parent, type);
+  Scope* result = new(zone()) Scope(parent, type);
   result->Initialize(inside_with);
   return result;
 }
@@ -462,7 +462,7 @@
 // Parser's scope stack. The constructor sets the parser's top scope
 // to the incoming scope, and the destructor resets it.
 //
-// Additionlaly, it stores transient information used during parsing.
+// Additionally, it stores transient information used during parsing.
 // These scopes are not kept around after parsing or referenced by syntax
 // trees so they can be stack-allocated and hence used by the pre-parser.
 
@@ -496,9 +496,6 @@
   void AddProperty() { expected_property_count_++; }
   int expected_property_count() { return expected_property_count_; }
 
-  void AddLoop() { loop_count_++; }
-  bool ContainsLoops() const { return loop_count_ > 0; }
-
  private:
   // Captures the number of literals that need materialization in the
   // function.  Includes regexp literals, and boilerplate for object
@@ -513,15 +510,13 @@
   bool only_simple_this_property_assignments_;
   Handle<FixedArray> this_property_assignments_;
 
-  // Captures the number of loops inside the scope.
-  int loop_count_;
-
   // Bookkeeping
   Parser* parser_;
   // Previous values
   LexicalScope* lexical_scope_parent_;
   Scope* previous_scope_;
   int previous_with_nesting_level_;
+  unsigned previous_ast_node_id_;
 };
 
 
@@ -530,14 +525,15 @@
     expected_property_count_(0),
     only_simple_this_property_assignments_(false),
     this_property_assignments_(isolate->factory()->empty_fixed_array()),
-    loop_count_(0),
     parser_(parser),
     lexical_scope_parent_(parser->lexical_scope_),
     previous_scope_(parser->top_scope_),
-    previous_with_nesting_level_(parser->with_nesting_level_) {
+    previous_with_nesting_level_(parser->with_nesting_level_),
+    previous_ast_node_id_(isolate->ast_node_id()) {
   parser->top_scope_ = scope;
   parser->lexical_scope_ = this;
   parser->with_nesting_level_ = 0;
+  isolate->set_ast_node_id(AstNode::kFunctionEntryId + 1);
 }
 
 
@@ -546,6 +542,7 @@
   parser_->top_scope_ = previous_scope_;
   parser_->lexical_scope_ = lexical_scope_parent_;
   parser_->with_nesting_level_ = previous_with_nesting_level_;
+  parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
 }
 
 
@@ -579,7 +576,7 @@
     : isolate_(script->GetIsolate()),
       symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
       script_(script),
-      scanner_(isolate_->scanner_constants()),
+      scanner_(isolate_->unicode_cache()),
       top_scope_(NULL),
       with_nesting_level_(0),
       lexical_scope_(NULL),
@@ -601,7 +598,7 @@
 
   HistogramTimerScope timer(isolate()->counters()->parse());
   isolate()->counters()->total_parse_size()->Increment(source->length());
-  fni_ = new FuncNameInferrer();
+  fni_ = new(zone()) FuncNameInferrer();
 
   // Initialize parser state.
   source->TryFlatten();
@@ -652,7 +649,7 @@
       CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
     }
     if (ok) {
-      result = new FunctionLiteral(
+      result = new(zone()) FunctionLiteral(
           no_name,
           top_scope_,
           body,
@@ -663,8 +660,7 @@
           0,
           0,
           source->length(),
-          false,
-          lexical_scope.ContainsLoops());
+          false);
     } else if (stack_overflow_) {
       isolate()->StackOverflow();
     }
@@ -713,7 +709,7 @@
   ASSERT(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
-  fni_ = new FuncNameInferrer();
+  fni_ = new(zone()) FuncNameInferrer();
   fni_->PushEnclosingName(name);
 
   mode_ = PARSE_EAGERLY;
@@ -1252,7 +1248,7 @@
       // one must take great care not to treat it as a
       // fall-through. It is much easier just to wrap the entire
       // try-statement in a statement block and put the labels there
-      Block* result = new Block(labels, 1, false);
+      Block* result = new(zone()) Block(labels, 1, false);
       Target target(&this->target_stack_, result);
       TryStatement* statement = ParseTryStatement(CHECK_OK);
       if (statement) {
@@ -1350,13 +1346,13 @@
   // a performance issue since it may lead to repeated
   // Runtime::DeclareContextSlot() calls.
   VariableProxy* proxy = top_scope_->NewUnresolved(name, inside_with());
-  top_scope_->AddDeclaration(new Declaration(proxy, mode, fun));
+  top_scope_->AddDeclaration(new(zone()) Declaration(proxy, mode, fun));
 
   // For global const variables we bind the proxy to a variable.
   if (mode == Variable::CONST && top_scope_->is_global_scope()) {
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
-    var = new Variable(top_scope_, name, Variable::CONST, true, kind);
+    var = new(zone()) Variable(top_scope_, name, Variable::CONST, true, kind);
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1444,10 +1440,11 @@
   // TODO(1240846): It's weird that native function declarations are
   // introduced dynamically when we meet their declarations, whereas
   // other functions are setup when entering the surrounding scope.
-  SharedFunctionInfoLiteral* lit = new SharedFunctionInfoLiteral(shared);
+  SharedFunctionInfoLiteral* lit =
+      new(zone()) SharedFunctionInfoLiteral(shared);
   VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
-  return new ExpressionStatement(
-      new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
+  return new(zone()) ExpressionStatement(new(zone()) Assignment(
+      Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
 }
 
 
@@ -1479,7 +1476,7 @@
   // (ECMA-262, 3rd, 12.2)
   //
   // Construct block expecting 16 statements.
-  Block* result = new Block(labels, 16, false);
+  Block* result = new(zone()) Block(labels, 16, false);
   Target target(&this->target_stack_, result);
   Expect(Token::LBRACE, CHECK_OK);
   while (peek() != Token::RBRACE) {
@@ -1549,7 +1546,7 @@
   // is inside an initializer block, it is ignored.
   //
   // Create new block with one expected declaration.
-  Block* block = new Block(NULL, 1, true);
+  Block* block = new(zone()) Block(NULL, 1, true);
   VariableProxy* last_var = NULL;  // the last variable declared
   int nvars = 0;  // the number of variables declared
   do {
@@ -1650,7 +1647,8 @@
     if (top_scope_->is_global_scope()) {
       // Compute the arguments for the runtime call.
       ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
-      arguments->Add(new Literal(name));  // we have at least 1 parameter
+      // We have at least 1 parameter.
+      arguments->Add(new(zone()) Literal(name));
       CallRuntime* initialize;
 
       if (is_const) {
@@ -1662,7 +1660,7 @@
         // Note that the function does different things depending on
         // the number of arguments (1 or 2).
         initialize =
-            new CallRuntime(
+            new(zone()) CallRuntime(
               isolate()->factory()->InitializeConstGlobal_symbol(),
               Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
               arguments);
@@ -1686,13 +1684,13 @@
         // Note that the function does different things depending on
         // the number of arguments (2 or 3).
         initialize =
-            new CallRuntime(
+            new(zone()) CallRuntime(
               isolate()->factory()->InitializeVarGlobal_symbol(),
               Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
               arguments);
       }
 
-      block->AddStatement(new ExpressionStatement(initialize));
+      block->AddStatement(new(zone()) ExpressionStatement(initialize));
     }
 
     // Add an assignment node to the initialization statement block if
@@ -1707,8 +1705,11 @@
     // the top context for variables). Sigh...
     if (value != NULL) {
       Token::Value op = (is_const ? Token::INIT_CONST : Token::INIT_VAR);
-      Assignment* assignment = new Assignment(op, last_var, value, position);
-      if (block) block->AddStatement(new ExpressionStatement(assignment));
+      Assignment* assignment =
+          new(zone()) Assignment(op, last_var, value, position);
+      if (block) {
+        block->AddStatement(new(zone()) ExpressionStatement(assignment));
+      }
     }
 
     if (fni_ != NULL) fni_->Leave();
@@ -1774,7 +1775,7 @@
 
   // Parsed expression statement.
   ExpectSemicolon(CHECK_OK);
-  return new ExpressionStatement(expr);
+  return new(zone()) ExpressionStatement(expr);
 }
 
 
@@ -1794,7 +1795,7 @@
   } else {
     else_statement = EmptyStatement();
   }
-  return new IfStatement(condition, then_statement, else_statement);
+  return new(zone()) IfStatement(condition, then_statement, else_statement);
 }
 
 
@@ -1824,7 +1825,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return new ContinueStatement(target);
+  return new(zone()) ContinueStatement(target);
 }
 
 
@@ -1859,7 +1860,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return new BreakStatement(target);
+  return new(zone()) BreakStatement(target);
 }
 
 
@@ -1880,7 +1881,7 @@
   if (!top_scope_->is_function_scope()) {
     Handle<String> type = isolate()->factory()->illegal_return_symbol();
     Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
-    return new ExpressionStatement(throw_error);
+    return new(zone()) ExpressionStatement(throw_error);
   }
 
   Token::Value tok = peek();
@@ -1889,12 +1890,12 @@
       tok == Token::RBRACE ||
       tok == Token::EOS) {
     ExpectSemicolon(CHECK_OK);
-    return new ReturnStatement(GetLiteralUndefined());
+    return new(zone()) ReturnStatement(GetLiteralUndefined());
   }
 
   Expression* expr = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-  return new ReturnStatement(expr);
+  return new(zone()) ReturnStatement(expr);
 }
 
 
@@ -1903,7 +1904,7 @@
                           bool is_catch_block,
                           bool* ok) {
   // Parse the statement and collect escaping labels.
-  ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
+  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
   TargetCollector collector(target_list);
   Statement* stat;
   { Target target(&this->target_stack_, &collector);
@@ -1915,21 +1916,21 @@
   // Create resulting block with two statements.
   // 1: Evaluate the with expression.
   // 2: The try-finally block evaluating the body.
-  Block* result = new Block(NULL, 2, false);
+  Block* result = new(zone()) Block(NULL, 2, false);
 
   if (result != NULL) {
-    result->AddStatement(new WithEnterStatement(obj, is_catch_block));
+    result->AddStatement(new(zone()) WithEnterStatement(obj, is_catch_block));
 
     // Create body block.
-    Block* body = new Block(NULL, 1, false);
+    Block* body = new(zone()) Block(NULL, 1, false);
     body->AddStatement(stat);
 
     // Create exit block.
-    Block* exit = new Block(NULL, 1, false);
-    exit->AddStatement(new WithExitStatement());
+    Block* exit = new(zone()) Block(NULL, 1, false);
+    exit->AddStatement(new(zone()) WithExitStatement());
 
     // Return a try-finally statement.
-    TryFinallyStatement* wrapper = new TryFinallyStatement(body, exit);
+    TryFinallyStatement* wrapper = new(zone()) TryFinallyStatement(body, exit);
     wrapper->set_escaping_targets(collector.targets());
     result->AddStatement(wrapper);
   }
@@ -1986,7 +1987,7 @@
     statements->Add(stat);
   }
 
-  return new CaseClause(label, statements, pos);
+  return new(zone()) CaseClause(label, statements, pos);
 }
 
 
@@ -1995,7 +1996,7 @@
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
-  SwitchStatement* statement = new SwitchStatement(labels);
+  SwitchStatement* statement = new(zone()) SwitchStatement(labels);
   Target target(&this->target_stack_, statement);
 
   Expect(Token::SWITCH, CHECK_OK);
@@ -2031,7 +2032,7 @@
   Expression* exception = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
 
-  return new ExpressionStatement(new Throw(exception, pos));
+  return new(zone()) ExpressionStatement(new(zone()) Throw(exception, pos));
 }
 
 
@@ -2049,7 +2050,7 @@
 
   Expect(Token::TRY, CHECK_OK);
 
-  ZoneList<BreakTarget*>* target_list = new ZoneList<BreakTarget*>(0);
+  ZoneList<Label*>* target_list = new ZoneList<Label*>(0);
   TargetCollector collector(target_list);
   Block* try_block;
 
@@ -2072,7 +2073,7 @@
   // then we will need to collect jump targets from the catch block. Since
   // we don't know yet if there will be a finally block, we always collect
   // the jump targets.
-  ZoneList<BreakTarget*>* catch_target_list = new ZoneList<BreakTarget*>(0);
+  ZoneList<Label*>* catch_target_list = new ZoneList<Label*>(0);
   TargetCollector catch_collector(catch_target_list);
   bool has_catch = false;
   if (tok == Token::CATCH) {
@@ -2095,9 +2096,10 @@
       // executing the finally block.
       catch_var =
           top_scope_->NewTemporary(isolate()->factory()->catch_var_symbol());
-      Literal* name_literal = new Literal(name);
-      VariableProxy* catch_var_use = new VariableProxy(catch_var);
-      Expression* obj = new CatchExtensionObject(name_literal, catch_var_use);
+      Literal* name_literal = new(zone()) Literal(name);
+      VariableProxy* catch_var_use = new(zone()) VariableProxy(catch_var);
+      Expression* obj =
+          new(zone()) CatchExtensionObject(name_literal, catch_var_use);
       { Target target(&this->target_stack_, &catch_collector);
         catch_block = WithHelper(obj, NULL, true, CHECK_OK);
       }
@@ -2121,11 +2123,11 @@
   //   'try { try { } catch { } } finally { }'
 
   if (catch_block != NULL && finally_block != NULL) {
-    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
+    VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
     TryCatchStatement* statement =
-        new TryCatchStatement(try_block, catch_var_defn, catch_block);
+        new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
     statement->set_escaping_targets(collector.targets());
-    try_block = new Block(NULL, 1, false);
+    try_block = new(zone()) Block(NULL, 1, false);
     try_block->AddStatement(statement);
     catch_block = NULL;
   }
@@ -2133,12 +2135,13 @@
   TryStatement* result = NULL;
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
-    VariableProxy* catch_var_defn = new VariableProxy(catch_var);
-    result = new TryCatchStatement(try_block, catch_var_defn, catch_block);
+    VariableProxy* catch_var_defn = new(zone()) VariableProxy(catch_var);
+    result =
+        new(zone()) TryCatchStatement(try_block, catch_var_defn, catch_block);
     result->set_escaping_targets(collector.targets());
   } else {
     ASSERT(finally_block != NULL);
-    result = new TryFinallyStatement(try_block, finally_block);
+    result = new(zone()) TryFinallyStatement(try_block, finally_block);
     // Add the jump targets of the try block and the catch block.
     for (int i = 0; i < collector.targets()->length(); i++) {
       catch_collector.AddTarget(collector.targets()->at(i));
@@ -2155,8 +2158,7 @@
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  lexical_scope_->AddLoop();
-  DoWhileStatement* loop = new DoWhileStatement(labels);
+  DoWhileStatement* loop = new(zone()) DoWhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2170,7 +2172,6 @@
   }
 
   Expression* cond = ParseExpression(true, CHECK_OK);
-  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
 
   // Allow do-statements to be terminated with and without
@@ -2188,14 +2189,12 @@
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  lexical_scope_->AddLoop();
-  WhileStatement* loop = new WhileStatement(labels);
+  WhileStatement* loop = new(zone()) WhileStatement(labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   Expression* cond = ParseExpression(true, CHECK_OK);
-  if (cond != NULL) cond->set_is_loop_condition(true);
   Expect(Token::RPAREN, CHECK_OK);
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
@@ -2208,7 +2207,6 @@
   // ForStatement ::
   //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
 
-  lexical_scope_->AddLoop();
   Statement* init = NULL;
 
   Expect(Token::FOR, CHECK_OK);
@@ -2219,7 +2217,7 @@
       Block* variable_statement =
           ParseVariableDeclarations(false, &each, CHECK_OK);
       if (peek() == Token::IN && each != NULL) {
-        ForInStatement* loop = new ForInStatement(labels);
+        ForInStatement* loop = new(zone()) ForInStatement(labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2228,7 +2226,7 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         loop->Initialize(each, enumerable, body);
-        Block* result = new Block(NULL, 2, false);
+        Block* result = new(zone()) Block(NULL, 2, false);
         result->AddStatement(variable_statement);
         result->AddStatement(loop);
         // Parsed for-in loop w/ variable/const declaration.
@@ -2249,7 +2247,7 @@
               isolate()->factory()->invalid_lhs_in_for_in_symbol();
           expression = NewThrowReferenceError(type);
         }
-        ForInStatement* loop = new ForInStatement(labels);
+        ForInStatement* loop = new(zone()) ForInStatement(labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2262,13 +2260,13 @@
         return loop;
 
       } else {
-        init = new ExpressionStatement(expression);
+        init = new(zone()) ExpressionStatement(expression);
       }
     }
   }
 
   // Standard 'for' loop
-  ForStatement* loop = new ForStatement(labels);
+  ForStatement* loop = new(zone()) ForStatement(labels);
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
@@ -2277,14 +2275,13 @@
   Expression* cond = NULL;
   if (peek() != Token::SEMICOLON) {
     cond = ParseExpression(true, CHECK_OK);
-    if (cond != NULL) cond->set_is_loop_condition(true);
   }
   Expect(Token::SEMICOLON, CHECK_OK);
 
   Statement* next = NULL;
   if (peek() != Token::RPAREN) {
     Expression* exp = ParseExpression(true, CHECK_OK);
-    next = new ExpressionStatement(exp);
+    next = new(zone()) ExpressionStatement(exp);
   }
   Expect(Token::RPAREN, CHECK_OK);
 
@@ -2305,7 +2302,7 @@
     Expect(Token::COMMA, CHECK_OK);
     int position = scanner().location().beg_pos;
     Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result = new BinaryOperation(Token::COMMA, result, right, position);
+    result = new(zone()) BinaryOperation(Token::COMMA, result, right, position);
   }
   return result;
 }
@@ -2377,7 +2374,7 @@
     fni_->Leave();
   }
 
-  return new Assignment(op, expression, right, pos);
+  return new(zone()) Assignment(op, expression, right, pos);
 }
 
 
@@ -2399,7 +2396,7 @@
   Expect(Token::COLON, CHECK_OK);
   int right_position = scanner().peek_location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return new Conditional(expression, left, right,
+  return new(zone()) Conditional(expression, left, right,
                          left_position, right_position);
 }
 
@@ -2487,12 +2484,12 @@
         x = NewCompareNode(cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
-          x = new UnaryOperation(Token::NOT, x);
+          x = new(zone()) UnaryOperation(Token::NOT, x);
         }
 
       } else {
         // We have a "normal" binary operation.
-        x = new BinaryOperation(op, x, y, position);
+        x = new(zone()) BinaryOperation(op, x, y, position);
       }
     }
   }
@@ -2509,15 +2506,15 @@
     bool is_strict = (op == Token::EQ_STRICT);
     Literal* x_literal = x->AsLiteral();
     if (x_literal != NULL && x_literal->IsNull()) {
-      return new CompareToNull(is_strict, y);
+      return new(zone()) CompareToNull(is_strict, y);
     }
 
     Literal* y_literal = y->AsLiteral();
     if (y_literal != NULL && y_literal->IsNull()) {
-      return new CompareToNull(is_strict, x);
+      return new(zone()) CompareToNull(is_strict, x);
     }
   }
-  return new CompareOperation(op, x, y, position);
+  return new(zone()) CompareOperation(op, x, y, position);
 }
 
 
@@ -2564,7 +2561,7 @@
       }
     }
 
-    return new UnaryOperation(op, expression);
+    return new(zone()) UnaryOperation(op, expression);
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
@@ -2585,8 +2582,10 @@
     }
 
     int position = scanner().location().beg_pos;
-    IncrementOperation* increment = new IncrementOperation(op, expression);
-    return new CountOperation(true /* prefix */, increment, position);
+    return new(zone()) CountOperation(op,
+                                      true /* prefix */,
+                                      expression,
+                                      position);
 
   } else {
     return ParsePostfixExpression(ok);
@@ -2618,8 +2617,11 @@
 
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
-    IncrementOperation* increment = new IncrementOperation(next, expression);
-    expression = new CountOperation(false /* postfix */, increment, position);
+    expression =
+        new(zone()) CountOperation(next,
+                                   false /* postfix */,
+                                   expression,
+                                   position);
   }
   return expression;
 }
@@ -2642,7 +2644,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new Property(result, index, pos);
+        result = new(zone()) Property(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
@@ -2680,7 +2682,7 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new Property(result, new Literal(name), pos);
+        result = new(zone()) Property(result, new(zone()) Literal(name), pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2716,7 +2718,7 @@
 
   if (!stack->is_empty()) {
     int last = stack->pop();
-    result = new CallNew(result, new ZoneList<Expression*>(0), last);
+    result = new(zone()) CallNew(result, new ZoneList<Expression*>(0), last);
   }
   return result;
 }
@@ -2761,7 +2763,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = new Property(result, index, pos);
+        result = new(zone()) Property(result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
@@ -2769,7 +2771,7 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result = new Property(result, new Literal(name), pos);
+        result = new(zone()) Property(result, new(zone()) Literal(name), pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -2797,7 +2799,7 @@
 
   Expect(Token::DEBUGGER, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-  return new DebuggerStatement();
+  return new(zone()) DebuggerStatement();
 }
 
 
@@ -2866,31 +2868,34 @@
 
     case Token::NULL_LITERAL:
       Consume(Token::NULL_LITERAL);
-      result = new Literal(isolate()->factory()->null_value());
+      result = new(zone()) Literal(isolate()->factory()->null_value());
       break;
 
     case Token::TRUE_LITERAL:
       Consume(Token::TRUE_LITERAL);
-      result = new Literal(isolate()->factory()->true_value());
+      result = new(zone()) Literal(isolate()->factory()->true_value());
       break;
 
     case Token::FALSE_LITERAL:
       Consume(Token::FALSE_LITERAL);
-      result = new Literal(isolate()->factory()->false_value());
+      result = new(zone()) Literal(isolate()->factory()->false_value());
       break;
 
     case Token::IDENTIFIER:
     case Token::FUTURE_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
-      result = top_scope_->NewUnresolved(name, inside_with());
+      result = top_scope_->NewUnresolved(name,
+                                         inside_with(),
+                                         scanner().location().beg_pos);
       break;
     }
 
     case Token::NUMBER: {
       Consume(Token::NUMBER);
       ASSERT(scanner().is_literal_ascii());
-      double value = StringToDouble(scanner().literal_ascii_string(),
+      double value = StringToDouble(isolate()->unicode_cache(),
+                                    scanner().literal_ascii_string(),
                                     ALLOW_HEX | ALLOW_OCTALS);
       result = NewNumberLiteral(value);
       break;
@@ -2899,7 +2904,7 @@
     case Token::STRING: {
       Consume(Token::STRING);
       Handle<String> symbol = GetSymbol(CHECK_OK);
-      result = new Literal(symbol);
+      result = new(zone()) Literal(symbol);
       if (fni_ != NULL) fni_->PushLiteralName(symbol);
       break;
     }
@@ -3026,7 +3031,7 @@
     literals->set_map(isolate()->heap()->fixed_cow_array_map());
   }
 
-  return new ArrayLiteral(literals, values,
+  return new(zone()) ArrayLiteral(literals, values,
                           literal_index, is_simple, depth);
 }
 
@@ -3304,7 +3309,7 @@
     // Allow any number of parameters for compatiabilty with JSC.
     // Specification only allows zero parameters for get and one for set.
     ObjectLiteral::Property* property =
-        new ObjectLiteral::Property(is_getter, value);
+        new(zone()) ObjectLiteral::Property(is_getter, value);
     return property;
   } else {
     ReportUnexpectedToken(next);
@@ -3370,7 +3375,7 @@
         }
         // Failed to parse as get/set property, so it's just a property
         // called "get" or "set".
-        key = new Literal(id);
+        key = new(zone()) Literal(id);
         break;
       }
       case Token::STRING: {
@@ -3382,13 +3387,14 @@
           key = NewNumberLiteral(index);
           break;
         }
-        key = new Literal(string);
+        key = new(zone()) Literal(string);
         break;
       }
       case Token::NUMBER: {
         Consume(Token::NUMBER);
         ASSERT(scanner().is_literal_ascii());
-        double value = StringToDouble(scanner().literal_ascii_string(),
+        double value = StringToDouble(isolate()->unicode_cache(),
+                                      scanner().literal_ascii_string(),
                                       ALLOW_HEX | ALLOW_OCTALS);
         key = NewNumberLiteral(value);
         break;
@@ -3397,7 +3403,7 @@
         if (Token::IsKeyword(next)) {
           Consume(next);
           Handle<String> string = GetSymbol(CHECK_OK);
-          key = new Literal(string);
+          key = new(zone()) Literal(string);
         } else {
           // Unexpected token.
           Token::Value next = Next();
@@ -3411,7 +3417,7 @@
     Expression* value = ParseAssignmentExpression(true, CHECK_OK);
 
     ObjectLiteral::Property* property =
-        new ObjectLiteral::Property(key, value);
+        new(zone()) ObjectLiteral::Property(key, value);
 
     // Mark object literals that contain function literals and pretenure the
     // literal so it can be added as a constant function property.
@@ -3450,7 +3456,7 @@
                                        &is_simple,
                                        &fast_elements,
                                        &depth);
-  return new ObjectLiteral(constant_properties,
+  return new(zone()) ObjectLiteral(constant_properties,
                            properties,
                            literal_index,
                            is_simple,
@@ -3475,7 +3481,7 @@
   Handle<String> js_flags = NextLiteralString(TENURED);
   Next();
 
-  return new RegExpLiteral(js_pattern, js_flags, literal_index);
+  return new(zone()) RegExpLiteral(js_pattern, js_flags, literal_index);
 }
 
 
@@ -3519,16 +3525,22 @@
   }
 
   int num_parameters = 0;
+  Scope* scope = NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+  ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
+  int materialized_literal_count;
+  int expected_property_count;
+  int start_pos;
+  int end_pos;
+  bool only_simple_this_property_assignments;
+  Handle<FixedArray> this_property_assignments;
   // Parse function body.
-  { Scope* scope =
-        NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
-    LexicalScope lexical_scope(this, scope, isolate());
+  { LexicalScope lexical_scope(this, scope, isolate());
     top_scope_->SetScopeName(name);
 
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
-    int start_pos = scanner().location().beg_pos;
+    start_pos = scanner().location().beg_pos;
     Scanner::Location name_loc = Scanner::NoLocation();
     Scanner::Location dupe_loc = Scanner::NoLocation();
     Scanner::Location reserved_loc = Scanner::NoLocation();
@@ -3565,7 +3577,6 @@
     Expect(Token::RPAREN, CHECK_OK);
 
     Expect(Token::LBRACE, CHECK_OK);
-    ZoneList<Statement*>* body = new ZoneList<Statement*>(8);
 
     // If we have a named function expression, we add a local variable
     // declaration to the body of the function with the name of the
@@ -3578,9 +3589,9 @@
       VariableProxy* fproxy =
           top_scope_->NewUnresolved(function_name, inside_with());
       fproxy->BindTo(fvar);
-      body->Add(new ExpressionStatement(
-                    new Assignment(Token::INIT_CONST, fproxy,
-                                   new ThisFunction(),
+      body->Add(new(zone()) ExpressionStatement(
+                    new(zone()) Assignment(Token::INIT_CONST, fproxy,
+                                   new(zone()) ThisFunction(),
                                    RelocInfo::kNoPosition)));
     }
 
@@ -3593,11 +3604,6 @@
     parenthesized_function_ = false;  // The bit was set for this function only.
 
     int function_block_pos = scanner().location().beg_pos;
-    int materialized_literal_count;
-    int expected_property_count;
-    int end_pos;
-    bool only_simple_this_property_assignments;
-    Handle<FixedArray> this_property_assignments;
     if (is_lazily_compiled && pre_data() != NULL) {
       FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
       if (!entry.is_valid()) {
@@ -3672,25 +3678,24 @@
       }
       CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
     }
-
-    FunctionLiteral* function_literal =
-        new FunctionLiteral(name,
-                            top_scope_,
-                            body,
-                            materialized_literal_count,
-                            expected_property_count,
-                            only_simple_this_property_assignments,
-                            this_property_assignments,
-                            num_parameters,
-                            start_pos,
-                            end_pos,
-                            function_name->length() > 0,
-                            lexical_scope.ContainsLoops());
-    function_literal->set_function_token_position(function_token_position);
-
-    if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
-    return function_literal;
   }
+
+  FunctionLiteral* function_literal =
+      new(zone()) FunctionLiteral(name,
+                                  scope,
+                                  body,
+                                  materialized_literal_count,
+                                  expected_property_count,
+                                  only_simple_this_property_assignments,
+                                  this_property_assignments,
+                                  num_parameters,
+                                  start_pos,
+                                  end_pos,
+                                  (function_name->length() > 0));
+  function_literal->set_function_token_position(function_token_position);
+
+  if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
+  return function_literal;
 }
 
 
@@ -3736,7 +3741,7 @@
   }
 
   // We have a valid intrinsics call or a call to a builtin.
-  return new CallRuntime(name, function, args);
+  return new(zone()) CallRuntime(name, function, args);
 }
 
 
@@ -3791,12 +3796,12 @@
 
 
 Literal* Parser::GetLiteralUndefined() {
-  return new Literal(isolate()->factory()->undefined_value());
+  return new(zone()) Literal(isolate()->factory()->undefined_value());
 }
 
 
 Literal* Parser::GetLiteralTheHole() {
-  return new Literal(isolate()->factory()->the_hole_value());
+  return new(zone()) Literal(isolate()->factory()->the_hole_value());
 }
 
 
@@ -3932,7 +3937,7 @@
 }
 
 
-void Parser::RegisterTargetUse(BreakTarget* target, Target* stop) {
+void Parser::RegisterTargetUse(Label* target, Target* stop) {
   // Register that a break target found at the given stop in the
   // target stack has been used from the top of the target stack. Add
   // the break target to any TargetCollectors passed on the stack.
@@ -3944,7 +3949,7 @@
 
 
 Literal* Parser::NewNumberLiteral(double number) {
-  return new Literal(isolate()->factory()->NewNumber(number, TENURED));
+  return new(zone()) Literal(isolate()->factory()->NewNumber(number, TENURED));
 }
 
 
@@ -3991,9 +3996,9 @@
                                                                        TENURED);
 
   ZoneList<Expression*>* args = new ZoneList<Expression*>(2);
-  args->Add(new Literal(type));
-  args->Add(new Literal(array));
-  return new Throw(new CallRuntime(constructor, NULL, args),
+  args->Add(new(zone()) Literal(type));
+  args->Add(new(zone()) Literal(array));
+  return new(zone()) Throw(new(zone()) CallRuntime(constructor, NULL, args),
                    scanner().location().beg_pos);
 }
 
@@ -4316,13 +4321,13 @@
 
       // Build result of subexpression.
       if (type == CAPTURE) {
-        RegExpCapture* capture = new RegExpCapture(body, capture_index);
+        RegExpCapture* capture = new(zone()) RegExpCapture(body, capture_index);
         captures_->at(capture_index - 1) = capture;
         body = capture;
       } else if (type != GROUPING) {
         ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD);
         bool is_positive = (type == POSITIVE_LOOKAHEAD);
-        body = new RegExpLookahead(body,
+        body = new(zone()) RegExpLookahead(body,
                                    is_positive,
                                    end_capture_index - capture_index,
                                    capture_index);
@@ -4345,10 +4350,10 @@
       Advance();
       if (multiline_) {
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::START_OF_LINE));
+            new(zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
       } else {
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::START_OF_INPUT));
+            new(zone()) RegExpAssertion(RegExpAssertion::START_OF_INPUT));
         set_contains_anchor();
       }
       continue;
@@ -4358,7 +4363,7 @@
       RegExpAssertion::Type type =
           multiline_ ? RegExpAssertion::END_OF_LINE :
                        RegExpAssertion::END_OF_INPUT;
-      builder->AddAssertion(new RegExpAssertion(type));
+      builder->AddAssertion(new(zone()) RegExpAssertion(type));
       continue;
     }
     case '.': {
@@ -4366,7 +4371,7 @@
       // everything except \x0a, \x0d, \u2028 and \u2029
       ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
       CharacterRange::AddClassEscape('.', ranges);
-      RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+      RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
       builder->AddAtom(atom);
       break;
     }
@@ -4399,7 +4404,7 @@
         captures_->Add(NULL);
       }
       // Store current state and begin new disjunction parsing.
-      stored_state = new RegExpParserState(stored_state,
+      stored_state = new(zone()) RegExpParserState(stored_state,
                                            type,
                                            captures_started());
       builder = stored_state->builder();
@@ -4419,12 +4424,12 @@
       case 'b':
         Advance(2);
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::BOUNDARY));
+            new(zone()) RegExpAssertion(RegExpAssertion::BOUNDARY));
         continue;
       case 'B':
         Advance(2);
         builder->AddAssertion(
-            new RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
+            new(zone()) RegExpAssertion(RegExpAssertion::NON_BOUNDARY));
         continue;
       // AtomEscape ::
       //   CharacterClassEscape
@@ -4436,7 +4441,7 @@
         Advance(2);
         ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2);
         CharacterRange::AddClassEscape(c, ranges);
-        RegExpTree* atom = new RegExpCharacterClass(ranges, false);
+        RegExpTree* atom = new(zone()) RegExpCharacterClass(ranges, false);
         builder->AddAtom(atom);
         break;
       }
@@ -4452,7 +4457,7 @@
             builder->AddEmpty();
             break;
           }
-          RegExpTree* atom = new RegExpBackReference(capture);
+          RegExpTree* atom = new(zone()) RegExpBackReference(capture);
           builder->AddAtom(atom);
           break;
         }
@@ -4970,7 +4975,7 @@
     ranges->Add(CharacterRange::Everything());
     is_negated = !is_negated;
   }
-  return new RegExpCharacterClass(ranges, is_negated);
+  return new(zone()) RegExpCharacterClass(ranges, is_negated);
 }
 
 
@@ -5053,7 +5058,7 @@
                                   bool allow_lazy,
                                   ParserRecorder* recorder) {
   Isolate* isolate = Isolate::Current();
-  V8JavaScriptScanner scanner(isolate->scanner_constants());
+  V8JavaScriptScanner scanner(isolate->unicode_cache());
   scanner.Initialize(source);
   intptr_t stack_limit = isolate->stack_guard()->real_climit();
   if (!preparser::PreParser::PreParseProgram(&scanner,
diff --git a/src/parser.h b/src/parser.h
index 74cb049..a63651a 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -280,6 +280,9 @@
   void FlushCharacters();
   void FlushText();
   void FlushTerms();
+  Zone* zone() { return zone_; }
+
+  Zone* zone_;
   bool pending_empty_;
   ZoneList<uc16>* characters_;
   BufferedZoneList<RegExpTree, 2> terms_;
@@ -389,6 +392,7 @@
   };
 
   Isolate* isolate() { return isolate_; }
+  Zone* zone() { return isolate_->zone(); }
 
   uc32 current() { return current_; }
   bool has_more() { return has_more_; }
@@ -453,6 +457,7 @@
   };
 
   Isolate* isolate() { return isolate_; }
+  Zone* zone() { return isolate_->zone(); }
 
   // Called by ParseProgram after setting up the scanner.
   FunctionLiteral* DoParseProgram(Handle<String> source,
@@ -650,7 +655,7 @@
   BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
   IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
 
-  void RegisterTargetUse(BreakTarget* target, Target* stop);
+  void RegisterTargetUse(Label* target, Target* stop);
 
   // Factory methods.
 
@@ -778,7 +783,7 @@
  private:
   JsonParser()
       : isolate_(Isolate::Current()),
-        scanner_(isolate_->scanner_constants()) { }
+        scanner_(isolate_->unicode_cache()) { }
   ~JsonParser() { }
 
   Isolate* isolate() { return isolate_; }
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 4b450c1..6511328 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -42,7 +42,6 @@
 #include "v8.h"
 
 #include "platform.h"
-#include "top.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #include "win32-headers.h"
@@ -59,6 +58,9 @@
 }
 
 
+static Mutex* limit_mutex = NULL;
+
+
 void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
@@ -67,6 +69,7 @@
   // call this setup code within the same millisecond.
   uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
+  limit_mutex = CreateMutex();
 }
 
 
@@ -119,6 +122,9 @@
 
 
 static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  ASSERT(limit_mutex != NULL);
+  ScopedLock lock(limit_mutex);
+
   lowest_ever_allocated = Min(lowest_ever_allocated, address);
   highest_ever_allocated =
       Max(highest_ever_allocated,
@@ -254,6 +260,7 @@
   const int kLibNameLen = FILENAME_MAX + 1;
   char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
 
+  i::Isolate* isolate = ISOLATE;
   // This loop will terminate once the scanning hits an EOF.
   while (true) {
     uintptr_t start, end;
@@ -287,7 +294,7 @@
         snprintf(lib_name, kLibNameLen,
                  "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
       }
-      LOG(SharedLibraryEvent(lib_name, start, end));
+      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
     } else {
       // Entry not describing executable data. Skip to end of line to setup
       // reading the next entry.
@@ -314,94 +321,58 @@
 }
 
 
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
 
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
   size_ = size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
   }
 }
 
 
-bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
-}
-
-
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-
-  if (mprotect(address, size, prot) != 0) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
     return false;
   }
 
-  UpdateAllocatedSpaceLimits(address, size);
+  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
   return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+  ASSERT(IsReserved());
+  return VirtualFree(address, size, MEM_DECOMMIT) != false;
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
-
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: thread_ = pthread_self(); break;
-      case ThreadHandle::INVALID: thread_ = kNoThread; break;
-    }
-  }
-
+  PlatformData() : thread_(kNoThread) {}
   pthread_t thread_;  // Thread handle for pthread.
 };
 
 
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
 
 
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -409,7 +380,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -417,6 +388,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
 }
 
 
@@ -425,8 +397,9 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread->thread_handle_data()->thread_ = pthread_self();
-  ASSERT(thread->IsValid());
+  thread->data()->thread_ = pthread_self();
+  ASSERT(thread->data()->thread_ != kNoThread);
+  Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
 }
@@ -439,13 +412,20 @@
 
 
 void Thread::Start() {
-  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
-  ASSERT(IsValid());
+  pthread_attr_t* attr_ptr = NULL;
+  pthread_attr_t attr;
+  if (stack_size_ > 0) {
+    pthread_attr_init(&attr);
+    pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+    attr_ptr = &attr;
+  }
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
@@ -623,128 +603,176 @@
 
 class Sampler::PlatformData : public Malloced {
  public:
-  explicit PlatformData(Sampler* sampler) {
-    sampler_ = sampler;
-    sampler_thread_ = INVALID_HANDLE_VALUE;
-    profiled_thread_ = INVALID_HANDLE_VALUE;
-  }
-
-  Sampler* sampler_;
-  HANDLE sampler_thread_;
-  HANDLE profiled_thread_;
-  RuntimeProfilerRateLimiter rate_limiter_;
-
-  // Sampler thread handler.
-  void Runner() {
-    while (sampler_->IsActive()) {
-      if (rate_limiter_.SuspendIfNecessary()) continue;
-      Sample();
-      Sleep(sampler_->interval_);
-    }
-  }
-
-  void Sample() {
-    if (sampler_->IsProfiling()) {
-      // Context used for sampling the register state of the profiled thread.
-      CONTEXT context;
-      memset(&context, 0, sizeof(context));
-
-      TickSample sample_obj;
-      TickSample* sample = CpuProfiler::TickSampleEvent();
-      if (sample == NULL) sample = &sample_obj;
-
-      static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
-      if (SuspendThread(profiled_thread_) == kSuspendFailed) return;
-      sample->state = Top::current_vm_state();
-
-      context.ContextFlags = CONTEXT_FULL;
-      if (GetThreadContext(profiled_thread_, &context) != 0) {
-#if V8_HOST_ARCH_X64
-        sample->pc = reinterpret_cast<Address>(context.Rip);
-        sample->sp = reinterpret_cast<Address>(context.Rsp);
-        sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
-        sample->pc = reinterpret_cast<Address>(context.Eip);
-        sample->sp = reinterpret_cast<Address>(context.Esp);
-        sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
-        sampler_->SampleStack(sample);
-        sampler_->Tick(sample);
-      }
-      ResumeThread(profiled_thread_);
-    }
-    if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
-  }
-};
-
-
-// Entry point for sampler thread.
-static DWORD __stdcall SamplerEntry(void* arg) {
-  Sampler::PlatformData* data =
-      reinterpret_cast<Sampler::PlatformData*>(arg);
-  data->Runner();
-  return 0;
-}
-
-
-// Initialize a profile sampler.
-Sampler::Sampler(int interval)
-    : interval_(interval),
-      profiling_(false),
-      active_(false),
-      samples_taken_(0) {
-  data_ = new PlatformData(this);
-}
-
-
-Sampler::~Sampler() {
-  delete data_;
-}
-
-
-// Start profiling.
-void Sampler::Start() {
-  // Do not start multiple threads for the same sampler.
-  ASSERT(!IsActive());
-
   // Get a handle to the calling thread. This is the thread that we are
   // going to profile. We need to make a copy of the handle because we are
   // going to use it in the sampler thread. Using GetThreadHandle() will
   // not work in this case. We're using OpenThread because DuplicateHandle
   // for some reason doesn't work in Chrome's sandbox.
-  data_->profiled_thread_ = OpenThread(THREAD_GET_CONTEXT |
-                                       THREAD_SUSPEND_RESUME |
-                                       THREAD_QUERY_INFORMATION,
-                                       false,
-                                       GetCurrentThreadId());
-  BOOL ok = data_->profiled_thread_ != NULL;
-  if (!ok) return;
+  PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+                                               THREAD_SUSPEND_RESUME |
+                                               THREAD_QUERY_INFORMATION,
+                                               false,
+                                               GetCurrentThreadId())) {}
 
-  // Start sampler thread.
-  DWORD tid;
+  ~PlatformData() {
+    if (profiled_thread_ != NULL) {
+      CloseHandle(profiled_thread_);
+      profiled_thread_ = NULL;
+    }
+  }
+
+  HANDLE profiled_thread() { return profiled_thread_; }
+
+ private:
+  HANDLE profiled_thread_;
+};
+
+
+class SamplerThread : public Thread {
+ public:
+  explicit SamplerThread(int interval)
+      : Thread(NULL, "SamplerThread"),
+        interval_(interval) {}
+
+  static void AddActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::AddActiveSampler(sampler);
+    if (instance_ == NULL) {
+      instance_ = new SamplerThread(sampler->interval());
+      instance_->Start();
+    } else {
+      ASSERT(instance_->interval_ == sampler->interval());
+    }
+  }
+
+  static void RemoveActiveSampler(Sampler* sampler) {
+    ScopedLock lock(mutex_);
+    SamplerRegistry::RemoveActiveSampler(sampler);
+    if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+      RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+      instance_->Join();
+      delete instance_;
+      instance_ = NULL;
+    }
+  }
+
+  // Implement Thread::Run().
+  virtual void Run() {
+    SamplerRegistry::State state;
+    while ((state = SamplerRegistry::GetState()) !=
+           SamplerRegistry::HAS_NO_SAMPLERS) {
+      bool cpu_profiling_enabled =
+          (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+      bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+      // When CPU profiling is enabled both JavaScript and C++ code is
+      // profiled. We must not suspend.
+      if (!cpu_profiling_enabled) {
+        if (rate_limiter_.SuspendIfNecessary()) continue;
+      }
+      if (cpu_profiling_enabled) {
+        if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+          return;
+        }
+      }
+      if (runtime_profiler_enabled) {
+        if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+          return;
+        }
+      }
+      OS::Sleep(interval_);
+    }
+  }
+
+  static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
+    if (!sampler->isolate()->IsInitialized()) return;
+    if (!sampler->IsProfiling()) return;
+    SamplerThread* sampler_thread =
+        reinterpret_cast<SamplerThread*>(raw_sampler_thread);
+    sampler_thread->SampleContext(sampler);
+  }
+
+  static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+    if (!sampler->isolate()->IsInitialized()) return;
+    sampler->isolate()->runtime_profiler()->NotifyTick();
+  }
+
+  void SampleContext(Sampler* sampler) {
+    HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
+    if (profiled_thread == NULL) return;
+
+    // Context used for sampling the register state of the profiled thread.
+    CONTEXT context;
+    memset(&context, 0, sizeof(context));
+
+    TickSample sample_obj;
+    TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate());
+    if (sample == NULL) sample = &sample_obj;
+
+    static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+    if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+    sample->state = sampler->isolate()->current_vm_state();
+
+    context.ContextFlags = CONTEXT_FULL;
+    if (GetThreadContext(profiled_thread, &context) != 0) {
+#if V8_HOST_ARCH_X64
+      sample->pc = reinterpret_cast<Address>(context.Rip);
+      sample->sp = reinterpret_cast<Address>(context.Rsp);
+      sample->fp = reinterpret_cast<Address>(context.Rbp);
+#else
+      sample->pc = reinterpret_cast<Address>(context.Eip);
+      sample->sp = reinterpret_cast<Address>(context.Esp);
+      sample->fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+      sampler->SampleStack(sample);
+      sampler->Tick(sample);
+    }
+    ResumeThread(profiled_thread);
+  }
+
+  const int interval_;
+  RuntimeProfilerRateLimiter rate_limiter_;
+
+  // Protects the process wide state below.
+  static Mutex* mutex_;
+  static SamplerThread* instance_;
+
+  DISALLOW_COPY_AND_ASSIGN(SamplerThread);
+};
+
+
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
+SamplerThread* SamplerThread::instance_ = NULL;
+
+
+Sampler::Sampler(Isolate* isolate, int interval)
+    : isolate_(isolate),
+      interval_(interval),
+      profiling_(false),
+      active_(false),
+      samples_taken_(0) {
+  data_ = new PlatformData;
+}
+
+
+Sampler::~Sampler() {
+  ASSERT(!IsActive());
+  delete data_;
+}
+
+
+void Sampler::Start() {
+  ASSERT(!IsActive());
   SetActive(true);
-  data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid);
-  // Set thread to high priority to increase sampling accuracy.
-  SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
+  SamplerThread::AddActiveSampler(this);
 }
 
 
-// Stop profiling.
 void Sampler::Stop() {
-  // Seting active to false triggers termination of the sampler
-  // thread.
+  ASSERT(IsActive());
+  SamplerThread::RemoveActiveSampler(this);
   SetActive(false);
-
-  // Wait for sampler thread to terminate.
-  Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
-  WaitForSingleObject(data_->sampler_thread_, INFINITE);
-
-  // Release the thread handles
-  CloseHandle(data_->sampler_thread_);
-  CloseHandle(data_->profiled_thread_);
 }
 
-
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 2a73b6e..8b83f2b 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -391,18 +391,8 @@
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
-
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: thread_ = pthread_self(); break;
-      case ThreadHandle::INVALID: thread_ = kNoThread; break;
-    }
-  }
   pthread_t thread_;  // Thread handle for pthread.
 };
 
@@ -433,7 +423,7 @@
 
 
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -441,7 +431,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -449,6 +439,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
 }
 
 
@@ -457,7 +448,7 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread->thread_handle_data()->thread_ = pthread_self();
+  thread_->data_->thread_ = pthread_self();
   ASSERT(thread->IsValid());
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
@@ -651,6 +642,11 @@
     // We require a fully initialized and entered isolate.
     return;
   }
+  if (v8::Locker::IsActive() &&
+      !isolate->thread_manager()->IsLockedByCurrentThread()) {
+    return;
+  }
+
   Sampler* sampler = isolate->logger()->sampler();
   if (sampler == NULL || !sampler->IsActive()) return;
 
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 73a6ccb..1ecd8fc 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -92,9 +92,10 @@
 
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
 #if (defined(__VFP_FP__) && !defined(__SOFTFP__))
-  // Here gcc is telling us that we are on an ARM and gcc is assuming that we
-  // have VFP3 instructions.  If gcc can assume it then so can we.
-  return 1u << VFP3;
+  // Here gcc is telling us that we are on an ARM and gcc is assuming
+  // that we have VFP3 instructions.  If gcc can assume it then so can
+  // we. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+  return 1u << VFP3 | 1u << ARMv7;
 #elif CAN_USE_ARMV7_INSTRUCTIONS
   return 1u << ARMv7;
 #elif(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -588,50 +589,15 @@
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
-
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: thread_ = pthread_self(); break;
-      case ThreadHandle::INVALID: thread_ = kNoThread; break;
-    }
-  }
+  PlatformData() : thread_(kNoThread) {}
 
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
-
-
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -639,7 +605,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -647,6 +613,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
 }
 
 
@@ -658,8 +625,8 @@
   prctl(PR_SET_NAME,
         reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
         0, 0, 0);
-  thread->thread_handle_data()->thread_ = pthread_self();
-  ASSERT(thread->IsValid());
+  thread->data()->thread_ = pthread_self();
+  ASSERT(thread->data()->thread_ != kNoThread);
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
@@ -680,13 +647,13 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(IsValid());
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
@@ -886,6 +853,11 @@
     // We require a fully initialized and entered isolate.
     return;
   }
+  if (v8::Locker::IsActive() &&
+      !isolate->thread_manager()->IsLockedByCurrentThread()) {
+    return;
+  }
+
   Sampler* sampler = isolate->logger()->sampler();
   if (sampler == NULL || !sampler->IsActive()) return;
 
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 17e3042..3e10b6a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -48,8 +48,10 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 #include <sys/types.h>
+#include <sys/sysctl.h>
 #include <stdarg.h>
 #include <stdlib.h>
+#include <string.h>
 #include <errno.h>
 
 #undef MAP_TYPE
@@ -390,50 +392,14 @@
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
-
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: thread_ = pthread_self(); break;
-      case ThreadHandle::INVALID: thread_ = kNoThread; break;
-    }
-  }
+  PlatformData() : thread_(kNoThread) {}
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
-
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
-
-
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -441,7 +407,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -449,6 +415,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
 }
 
 
@@ -474,9 +441,9 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread->thread_handle_data()->thread_ = pthread_self();
+  thread->data()->thread_ = pthread_self();
   SetThreadName(thread->name());
-  ASSERT(thread->IsValid());
+  ASSERT(thread->data()->thread_ != kNoThread);
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
@@ -497,22 +464,89 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
-  ASSERT(IsValid());
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+  const size_t kBufferSize = 128;
+  char buffer[kBufferSize];
+  size_t buffer_size = kBufferSize;
+  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+  }
+  // The buffer now contains a string of the form XX.YY.ZZ, where
+  // XX is the major kernel version component.
+  // Make sure the buffer is 0-terminated.
+  buffer[kBufferSize - 1] = '\0';
+  char* period_pos = strchr(buffer, '.');
+  *period_pos = '\0';
+  int kernel_version_major =
+      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
+  // The constants below are taken from pthreads.s from the XNU kernel
+  // sources archive at www.opensource.apple.com.
+  if (kernel_version_major < 11) {
+    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+    // same offsets.
+#if defined(V8_HOST_ARCH_IA32)
+    kMacTlsBaseOffset = 0x48;
+#else
+    kMacTlsBaseOffset = 0x60;
+#endif
+  } else {
+    // 11.x.x (Lion) changed the offset.
+    kMacTlsBaseOffset = 0;
+  }
+
+  Release_Store(&tls_base_offset_initialized, 1);
+}
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+  void* expected = reinterpret_cast<void*>(0x1234CAFE);
+  Thread::SetThreadLocal(key, expected);
+  void* actual = Thread::GetExistingThreadLocal(key);
+  if (expected != actual) {
+    V8_Fatal(__FILE__, __LINE__,
+             "V8 failed to initialize fast TLS on current kernel");
+  }
+  Thread::SetThreadLocal(key, NULL);
+}
+
+#endif  // V8_FAST_TLS_SUPPORTED
+
+
 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+  bool check_fast_tls = false;
+  if (tls_base_offset_initialized == 0) {
+    check_fast_tls = true;
+    InitializeTlsBaseOffset();
+  }
+#endif
   pthread_key_t key;
   int result = pthread_key_create(&key, NULL);
   USE(result);
   ASSERT(result == 0);
-  return static_cast<LocalStorageKey>(key);
+  LocalStorageKey typed_key = static_cast<LocalStorageKey>(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+  // If we just initialized fast TLS support, make sure it works.
+  if (check_fast_tls) CheckFastTls(typed_key);
+#endif
+  return typed_key;
 }
 
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 5409936..aacad14 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -299,9 +299,9 @@
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
+  PlatformData() {
     UNIMPLEMENTED();
   }
 
@@ -309,39 +309,8 @@
 };
 
 
-ThreadHandle::ThreadHandle(Kind kind) {
-  UNIMPLEMENTED();
-  // Shared setup follows.
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  UNIMPLEMENTED();
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  UNIMPLEMENTED();
-  // Shared tear down follows.
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  UNIMPLEMENTED();
-  return false;
-}
-
-
-bool ThreadHandle::IsValid() const {
-  UNIMPLEMENTED();
-  return false;
-}
-
-
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -350,7 +319,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -359,6 +328,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
   UNIMPLEMENTED();
 }
 
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index fe1a62a..e90b3e8 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -359,49 +359,16 @@
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
+  PlatformData() : thread_(kNoThread) {}
 
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: thread_ = pthread_self(); break;
-      case ThreadHandle::INVALID: thread_ = kNoThread; break;
-    }
-  }
   pthread_t thread_;  // Thread handle for pthread.
 };
 
 
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
-
-
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -409,7 +376,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatfromData()),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -417,6 +384,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
 }
 
 
@@ -425,8 +393,8 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread->thread_handle_data()->thread_ = pthread_self();
-  ASSERT(thread->IsValid());
+  thread->data()->thread_ = pthread_self();
+  ASSERT(thread->data()->thread_ != kNoThread);
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
@@ -447,13 +415,13 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&thread_handle_data()->thread_, attr_ptr, ThreadEntry, this);
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
   ASSERT(IsValid());
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 1dd486e..c4b0fb8 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -205,6 +205,31 @@
 }
 
 
+#if defined(V8_TARGET_ARCH_IA32)
+static OS::MemCopyFunction memcopy_function = NULL;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
+// Defined in codegen-ia32.cc.
+OS::MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+void OS::MemCopy(void* dest, const void* src, size_t size) {
+  if (memcopy_function == NULL) {
+    ScopedLock lock(memcopy_function_mutex);
+    if (memcopy_function == NULL) {
+      OS::MemCopyFunction temp = CreateMemCopyFunction();
+      MemoryBarrier();
+      memcopy_function = temp;
+    }
+  }
+  // Note: here we rely on dependent reads being ordered. This is true
+  // on all architectures we currently support.
+  (*memcopy_function)(dest, src, size);
+#ifdef DEBUG
+  CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+#endif  // V8_TARGET_ARCH_IA32
+
 // ----------------------------------------------------------------------------
 // POSIX string support.
 //
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index da278f3..1a19bac 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -373,50 +373,15 @@
 }
 
 
-class ThreadHandle::PlatformData : public Malloced {
+class Thread::PlatformData : public Malloced {
  public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
-
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: thread_ = pthread_self(); break;
-      case ThreadHandle::INVALID: thread_ = kNoThread; break;
-    }
-  }
+  PlatformData() : thread_(kNoThread) {  }
 
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
-ThreadHandle::ThreadHandle(Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-bool ThreadHandle::IsSelf() const {
-  return pthread_equal(data_->thread_, pthread_self());
-}
-
-
-bool ThreadHandle::IsValid() const {
-  return data_->thread_ != kNoThread;
-}
-
-
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(options.stack_size) {
   set_name(options.name);
@@ -424,7 +389,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
+    : data_(new PlatformData()),
       isolate_(isolate),
       stack_size_(0) {
   set_name(name);
@@ -432,6 +397,7 @@
 
 
 Thread::~Thread() {
+  delete data_;
 }
 
 
@@ -440,8 +406,8 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread->thread_handle_data()->thread_ = pthread_self();
-  ASSERT(thread->IsValid());
+  thread->data()->thread_ = pthread_self();
+  ASSERT(thread->data()->thread_ != kNoThread);
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return NULL;
@@ -462,13 +428,13 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
-  ASSERT(IsValid());
+  pthread_create(&data_->thread_, NULL, ThreadEntry, this);
+  ASSERT(data_->thread_ != kNoThread);
 }
 
 
 void Thread::Join() {
-  pthread_join(thread_handle_data()->thread_, NULL);
+  pthread_join(data_->thread_, NULL);
 }
 
 
diff --git a/src/platform-tls-mac.h b/src/platform-tls-mac.h
index 86a3347..728524e 100644
--- a/src/platform-tls-mac.h
+++ b/src/platform-tls-mac.h
@@ -37,20 +37,20 @@
 
 #define V8_FAST_TLS_SUPPORTED 1
 
+extern intptr_t kMacTlsBaseOffset;
+
 INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
 
 inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
-  // The constants below are taken from pthreads.s from the XNU kernel
-  // sources archive at www.opensource.apple.com.
   intptr_t result;
 #if defined(V8_HOST_ARCH_IA32)
-  asm("movl %%gs:0x48(,%1,4), %0;"
+  asm("movl %%gs:(%1,%2,4), %0;"
       :"=r"(result)  // Output must be a writable register.
-      :"0"(index));  // Input is the same as output.
+      :"r"(kMacTlsBaseOffset), "r"(index));
 #else
-  asm("movq %%gs:0x60(,%1,8), %0;"
+  asm("movq %%gs:(%1,%2,8), %0;"
       :"=r"(result)
-      :"0"(index));
+      :"r"(kMacTlsBaseOffset), "r"(index));
 #endif
   return result;
 }
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 50a9e5b..8673f04 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -176,16 +176,50 @@
 
 static Mutex* limit_mutex = NULL;
 
+#if defined(V8_TARGET_ARCH_IA32)
+static OS::MemCopyFunction memcopy_function = NULL;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
+// Defined in codegen-ia32.cc.
+OS::MemCopyFunction CreateMemCopyFunction();
+
+// Copy memory area to disjoint memory area.
+void OS::MemCopy(void* dest, const void* src, size_t size) {
+  if (memcopy_function == NULL) {
+    ScopedLock lock(memcopy_function_mutex);
+    if (memcopy_function == NULL) {
+      OS::MemCopyFunction temp = CreateMemCopyFunction();
+      MemoryBarrier();
+      memcopy_function = temp;
+    }
+  }
+  // Note: here we rely on dependent reads being ordered. This is true
+  // on all architectures we currently support.
+  (*memcopy_function)(dest, src, size);
+#ifdef DEBUG
+  CHECK_EQ(0, memcmp(dest, src, size));
+#endif
+}
+#endif  // V8_TARGET_ARCH_IA32
 
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
-
+static ModuloFunction modulo_function = NULL;
+static Mutex* modulo_function_mutex = OS::CreateMutex();
 // Defined in codegen-x64.cc.
 ModuloFunction CreateModuloFunction();
 
 double modulo(double x, double y) {
-  static ModuloFunction function = CreateModuloFunction();
-  return function(x, y);
+  if (modulo_function == NULL) {
+    ScopedLock lock(modulo_function_mutex);
+    if (modulo_function == NULL) {
+      ModuloFunction temp = CreateModuloFunction();
+      MemoryBarrier();
+      modulo_function = temp;
+    }
+  }
+  // Note: here we rely on dependent reads being ordered. This is true
+  // on all architectures we currently support.
+  return (*modulo_function)(x, y);
 }
 #else  // Win32
 
@@ -1434,24 +1468,6 @@
 
 // Definition of invalid thread handle and id.
 static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
-static const DWORD kNoThreadId = 0;
-
-
-class ThreadHandle::PlatformData : public Malloced {
- public:
-  explicit PlatformData(ThreadHandle::Kind kind) {
-    Initialize(kind);
-  }
-
-  void Initialize(ThreadHandle::Kind kind) {
-    switch (kind) {
-      case ThreadHandle::SELF: tid_ = GetCurrentThreadId(); break;
-      case ThreadHandle::INVALID: tid_ = kNoThreadId; break;
-    }
-  }
-  DWORD tid_;  // Win32 thread identifier.
-};
-
 
 // Entry point for threads. The supplied argument is a pointer to the thread
 // object. The entry function dispatches to the run method in the thread
@@ -1462,41 +1478,12 @@
   // This is also initialized by the last parameter to _beginthreadex() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-  thread->thread_handle_data()->tid_ = GetCurrentThreadId();
   Thread::SetThreadLocal(Isolate::isolate_key(), thread->isolate());
   thread->Run();
   return 0;
 }
 
 
-// Initialize thread handle to invalid handle.
-ThreadHandle::ThreadHandle(ThreadHandle::Kind kind) {
-  data_ = new PlatformData(kind);
-}
-
-
-ThreadHandle::~ThreadHandle() {
-  delete data_;
-}
-
-
-// The thread is running if it has the same id as the current thread.
-bool ThreadHandle::IsSelf() const {
-  return GetCurrentThreadId() == data_->tid_;
-}
-
-
-// Test for invalid thread handle.
-bool ThreadHandle::IsValid() const {
-  return data_->tid_ != kNoThreadId;
-}
-
-
-void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
-  data_->Initialize(kind);
-}
-
-
 class Thread::PlatformData : public Malloced {
  public:
   explicit PlatformData(HANDLE thread) : thread_(thread) {}
@@ -1508,8 +1495,7 @@
 // handle until it is started.
 
 Thread::Thread(Isolate* isolate, const Options& options)
-    : ThreadHandle(ThreadHandle::INVALID),
-      isolate_(isolate),
+    : isolate_(isolate),
       stack_size_(options.stack_size) {
   data_ = new PlatformData(kNoThread);
   set_name(options.name);
@@ -1517,8 +1503,7 @@
 
 
 Thread::Thread(Isolate* isolate, const char* name)
-    : ThreadHandle(ThreadHandle::INVALID),
-      isolate_(isolate),
+    : isolate_(isolate),
       stack_size_(0) {
   data_ = new PlatformData(kNoThread);
   set_name(name);
@@ -1548,9 +1533,7 @@
                      ThreadEntry,
                      this,
                      0,
-                     reinterpret_cast<unsigned int*>(
-                         &thread_handle_data()->tid_)));
-  ASSERT(IsValid());
+                     NULL));
 }
 
 
diff --git a/src/platform.h b/src/platform.h
index b2e0c48..fc417ef 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -303,6 +303,21 @@
 
   static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
 
+#if defined(V8_TARGET_ARCH_IA32)
+  // Copy memory area to disjoint memory area.
+  static void MemCopy(void* dest, const void* src, size_t size);
+  // Limit below which the extra overhead of the MemCopy function is likely
+  // to outweigh the benefits of faster copying.
+  static const int kMinComplexMemCopy = 64;
+  typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
+
+#else  // V8_TARGET_ARCH_IA32
+  static void MemCopy(void* dest, const void* src, size_t size) {
+    memcpy(dest, src, size);
+  }
+  static const int kMinComplexMemCopy = 256;
+#endif  // V8_TARGET_ARCH_IA32
+
  private:
   static const int msPerSecond = 1000;
 
@@ -339,40 +354,6 @@
   size_t size_;  // Size of the virtual memory.
 };
 
-
-// ----------------------------------------------------------------------------
-// ThreadHandle
-//
-// A ThreadHandle represents a thread identifier for a thread. The ThreadHandle
-// does not own the underlying os handle. Thread handles can be used for
-// refering to threads and testing equality.
-
-class ThreadHandle {
- public:
-  enum Kind { SELF, INVALID };
-  explicit ThreadHandle(Kind kind);
-
-  // Destructor.
-  ~ThreadHandle();
-
-  // Test for thread running.
-  bool IsSelf() const;
-
-  // Test for valid thread handle.
-  bool IsValid() const;
-
-  // Get platform-specific data.
-  class PlatformData;
-  PlatformData* thread_handle_data() { return data_; }
-
-  // Initialize the handle to kind
-  void Initialize(Kind kind);
-
- private:
-  PlatformData* data_;  // Captures platform dependent data.
-};
-
-
 // ----------------------------------------------------------------------------
 // Thread
 //
@@ -381,7 +362,7 @@
 // thread. The Thread object should not be deallocated before the thread has
 // terminated.
 
-class Thread: public ThreadHandle {
+class Thread {
  public:
   // Opaque data type for thread-local storage keys.
   // LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
@@ -453,11 +434,15 @@
   // The thread name length is limited to 16 based on Linux's implementation of
   // prctl().
   static const int kMaxThreadNameLength = 16;
+
+  class PlatformData;
+  PlatformData* data() { return data_; }
+
  private:
   void set_name(const char *name);
 
-  class PlatformData;
   PlatformData* data_;
+
   Isolate* isolate_;
   char name_[kMaxThreadNameLength];
   int stack_size_;
@@ -493,10 +478,10 @@
 
 
 // ----------------------------------------------------------------------------
-// ScopedLock/ScopedUnlock
+// ScopedLock
 //
-// Stack-allocated ScopedLocks/ScopedUnlocks provide block-scoped
-// locking and unlocking of a mutex.
+// Stack-allocated ScopedLocks provide block-scoped locking and
+// unlocking of a mutex.
 class ScopedLock {
  public:
   explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
@@ -596,7 +581,8 @@
         sp(NULL),
         fp(NULL),
         tos(NULL),
-        frames_count(0) {}
+        frames_count(0),
+        has_external_callback(false) {}
   StateTag state;  // The state of the VM.
   Address pc;      // Instruction pointer.
   Address sp;      // Stack pointer.
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 61e9e7e..9646eb6 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -159,8 +159,8 @@
 
 class StandAloneJavaScriptScanner : public JavaScriptScanner {
  public:
-  explicit StandAloneJavaScriptScanner(ScannerConstants* scanner_constants)
-      : JavaScriptScanner(scanner_constants) { }
+  explicit StandAloneJavaScriptScanner(UnicodeCache* unicode_cache)
+      : JavaScriptScanner(unicode_cache) { }
 
   void Initialize(UC16CharacterStream* source) {
     source_ = source;
@@ -192,8 +192,8 @@
 PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
   internal::InputStreamUTF16Buffer buffer(input);
   uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
-  internal::ScannerConstants scanner_constants;
-  internal::StandAloneJavaScriptScanner scanner(&scanner_constants);
+  internal::UnicodeCache unicode_cache;
+  internal::StandAloneJavaScriptScanner scanner(&unicode_cache);
   scanner.Initialize(&buffer);
   internal::CompleteParserRecorder recorder;
   preparser::PreParser::PreParseResult result =
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 043ad1c..c777ab4 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -376,11 +376,6 @@
 }
 
 
-void PrettyPrinter::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
 void PrettyPrinter::VisitCountOperation(CountOperation* node) {
   Print("(");
   if (node->is_prefix()) Print("%s", Token::String(node->op()));
@@ -609,16 +604,6 @@
   IndentedScope(AstPrinter* printer, const char* txt, AstNode* node = NULL)
       : ast_printer_(printer) {
     ast_printer_->PrintIndented(txt);
-    if (node != NULL && node->AsExpression() != NULL) {
-      Expression* expr = node->AsExpression();
-      bool printed_first = false;
-      if ((expr->type() != NULL) && (expr->type()->IsKnown())) {
-        ast_printer_->Print(" (type = ");
-        ast_printer_->Print(StaticType::Type2String(expr->type()));
-        printed_first = true;
-      }
-      if (printed_first) ast_printer_->Print(")");
-    }
     ast_printer_->Print("\n");
     ast_printer_->inc_indent();
   }
@@ -664,18 +649,13 @@
 
 void AstPrinter::PrintLiteralWithModeIndented(const char* info,
                                               Variable* var,
-                                              Handle<Object> value,
-                                              StaticType* type) {
+                                              Handle<Object> value) {
   if (var == NULL) {
     PrintLiteralIndented(info, value, true);
   } else {
     EmbeddedVector<char, 256> buf;
     int pos = OS::SNPrintF(buf, "%s (mode = %s", info,
                            Variable::Mode2String(var->mode()));
-    if (type->IsKnown()) {
-      pos += OS::SNPrintF(buf + pos, ", type = %s",
-                          StaticType::Type2String(type));
-    }
     OS::SNPrintF(buf + pos, ")");
     PrintLiteralIndented(buf.start(), value, true);
   }
@@ -732,8 +712,7 @@
     IndentedScope indent(this, "PARAMS");
     for (int i = 0; i < scope->num_parameters(); i++) {
       PrintLiteralWithModeIndented("VAR", scope->parameter(i),
-                                   scope->parameter(i)->name(),
-                                   scope->parameter(i)->type());
+                                   scope->parameter(i)->name());
     }
   }
 }
@@ -777,8 +756,7 @@
     // var or const declarations
     PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
                                  node->proxy()->AsVariable(),
-                                 node->proxy()->name(),
-                                 node->proxy()->AsVariable()->type());
+                                 node->proxy()->name());
   } else {
     // function declarations
     PrintIndented("FUNCTION ");
@@ -996,8 +974,7 @@
 
 
 void AstPrinter::VisitVariableProxy(VariableProxy* node) {
-  PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(),
-                               node->type());
+  PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name());
   Variable* var = node->var();
   if (var != NULL && var->rewrite() != NULL) {
     IndentedScope indent(this);
@@ -1056,22 +1033,10 @@
 }
 
 
-void AstPrinter::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
 void AstPrinter::VisitCountOperation(CountOperation* node) {
   EmbeddedVector<char, 128> buf;
-  if (node->type()->IsKnown()) {
-    OS::SNPrintF(buf, "%s %s (type = %s)",
-                 (node->is_prefix() ? "PRE" : "POST"),
-                 Token::Name(node->op()),
-                 StaticType::Type2String(node->type()));
-  } else {
-    OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
-                 Token::Name(node->op()));
-  }
+  OS::SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
+               Token::Name(node->op()));
   PrintIndentedVisit(buf.start(), node->expression());
 }
 
@@ -1461,11 +1426,6 @@
 }
 
 
-void JsonAstBuilder::VisitIncrementOperation(IncrementOperation* expr) {
-  UNREACHABLE();
-}
-
-
 void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
   TagScope tag(this, "CountOperation");
   {
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 284a93f..451b17e 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -104,8 +104,7 @@
   void PrintLiteralIndented(const char* info, Handle<Object> value, bool quote);
   void PrintLiteralWithModeIndented(const char* info,
                                     Variable* var,
-                                    Handle<Object> value,
-                                    StaticType* type);
+                                    Handle<Object> value);
   void PrintLabelsIndented(const char* info, ZoneStringList* labels);
 
   void inc_indent() { indent_++; }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index c9db94f..4cf62e2 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1690,7 +1690,7 @@
                         : "",
                     children_count,
                     retainers_count);
-  } else if (object->IsFixedArray()) {
+  } else if (object->IsFixedArray() || object->IsByteArray()) {
     return AddEntry(object,
                     HeapEntry::kArray,
                     "",
@@ -1705,7 +1705,7 @@
   }
   return AddEntry(object,
                   HeapEntry::kHidden,
-                  "system",
+                  GetSystemEntryName(object),
                   children_count,
                   retainers_count);
 }
@@ -1731,6 +1731,21 @@
 }
 
 
+const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
+  switch (object->map()->instance_type()) {
+    case MAP_TYPE: return "system / Map";
+    case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
+    case PROXY_TYPE: return "system / Proxy";
+    case ODDBALL_TYPE: return "system / Oddball";
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+    case NAME##_TYPE: return "system / "#Name;
+  STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+    default: return "system";
+  }
+}
+
+
 int V8HeapExplorer::EstimateObjectsCount() {
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
   int objects_count = 0;
@@ -1745,12 +1760,10 @@
  public:
   IndexedReferencesExtractor(V8HeapExplorer* generator,
                              HeapObject* parent_obj,
-                             HeapEntry* parent_entry,
-                             bool process_field_marks = false)
+                             HeapEntry* parent_entry)
       : generator_(generator),
         parent_obj_(parent_obj),
         parent_(parent_entry),
-        process_field_marks_(process_field_marks),
         next_index_(1) {
   }
   void VisitPointers(Object** start, Object** end) {
@@ -1768,7 +1781,7 @@
   }
  private:
   bool CheckVisitedAndUnmark(Object** field) {
-    if (process_field_marks_ && (*field)->IsFailure()) {
+    if ((*field)->IsFailure()) {
       intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
       *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
       ASSERT((*field)->IsHeapObject());
@@ -1779,7 +1792,6 @@
   V8HeapExplorer* generator_;
   HeapObject* parent_obj_;
   HeapEntry* parent_;
-  bool process_field_marks_;
   int next_index_;
 };
 
@@ -1794,6 +1806,7 @@
     // uses for the global object.
     JSGlobalProxy* proxy = JSGlobalProxy::cast(obj);
     SetRootShortcutReference(proxy->map()->prototype());
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
     IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   } else if (obj->IsJSObject()) {
@@ -1806,10 +1819,6 @@
         obj, entry, HEAP->Proto_symbol(), js_obj->GetPrototype());
     if (obj->IsJSFunction()) {
       JSFunction* js_fun = JSFunction::cast(js_obj);
-      SetInternalReference(
-          js_fun, entry,
-          "code", js_fun->shared(),
-          JSFunction::kSharedFunctionInfoOffset);
       Object* proto_or_map = js_fun->prototype_or_initial_map();
       if (!proto_or_map->IsTheHole()) {
         if (!proto_or_map->IsMap()) {
@@ -1823,8 +1832,24 @@
               HEAP->prototype_symbol(), js_fun->prototype());
         }
       }
+      SetInternalReference(js_fun, entry,
+                           "shared", js_fun->shared(),
+                           JSFunction::kSharedFunctionInfoOffset);
+      SetInternalReference(js_fun, entry,
+                           "context", js_fun->unchecked_context(),
+                           JSFunction::kContextOffset);
+      SetInternalReference(js_fun, entry,
+                           "literals", js_fun->literals(),
+                           JSFunction::kLiteralsOffset);
     }
-    IndexedReferencesExtractor refs_extractor(this, obj, entry, true);
+    SetInternalReference(obj, entry,
+                         "properties", js_obj->properties(),
+                         JSObject::kPropertiesOffset);
+    SetInternalReference(obj, entry,
+                         "elements", js_obj->elements(),
+                         JSObject::kElementsOffset);
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   } else if (obj->IsString()) {
     if (obj->IsConsString()) {
@@ -1832,7 +1857,41 @@
       SetInternalReference(obj, entry, 1, cs->first());
       SetInternalReference(obj, entry, 2, cs->second());
     }
+  } else if (obj->IsMap()) {
+    Map* map = Map::cast(obj);
+    SetInternalReference(obj, entry,
+                         "prototype", map->prototype(), Map::kPrototypeOffset);
+    SetInternalReference(obj, entry,
+                         "constructor", map->constructor(),
+                         Map::kConstructorOffset);
+    SetInternalReference(obj, entry,
+                         "descriptors", map->instance_descriptors(),
+                         Map::kInstanceDescriptorsOffset);
+    SetInternalReference(obj, entry,
+                         "code_cache", map->code_cache(),
+                         Map::kCodeCacheOffset);
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
+    obj->Iterate(&refs_extractor);
+  } else if (obj->IsSharedFunctionInfo()) {
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+    SetInternalReference(obj, entry,
+                         "name", shared->name(),
+                         SharedFunctionInfo::kNameOffset);
+    SetInternalReference(obj, entry,
+                         "code", shared->unchecked_code(),
+                         SharedFunctionInfo::kCodeOffset);
+    SetInternalReference(obj, entry,
+                         "instance_class_name", shared->instance_class_name(),
+                         SharedFunctionInfo::kInstanceClassNameOffset);
+    SetInternalReference(obj, entry,
+                         "script", shared->script(),
+                         SharedFunctionInfo::kScriptOffset);
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+    IndexedReferencesExtractor refs_extractor(this, obj, entry);
+    obj->Iterate(&refs_extractor);
   } else {
+    SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
     IndexedReferencesExtractor refs_extractor(this, obj, entry);
     obj->Iterate(&refs_extractor);
   }
@@ -2236,7 +2295,7 @@
     ObjectGroup* group = groups->at(i);
     if (group->info_ == NULL) continue;
     List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
-    for (int j = 0; j < group->objects_.length(); ++j) {
+    for (size_t j = 0; j < group->length_; ++j) {
       HeapObject* obj = HeapObject::cast(*group->objects_[j]);
       list->Add(obj);
       in_groups_.Insert(obj);
@@ -2307,7 +2366,7 @@
   ASSERT(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
                              wrapper, wrapper_entry,
-                             "Native",
+                             "native",
                              info, info_entry);
   filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
                                         info, info_entry,
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 377c083..bbc9efc 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -930,6 +930,7 @@
                       const char* name,
                       int children_count,
                       int retainers_count);
+  const char* GetSystemEntryName(HeapObject* object);
   void ExtractReferences(HeapObject* obj);
   void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
   void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
diff --git a/src/property.h b/src/property.h
index fa3916e..ee95ca2 100644
--- a/src/property.h
+++ b/src/property.h
@@ -185,6 +185,13 @@
     number_ = number;
   }
 
+  void DescriptorResult(JSObject* holder, Smi* details, int number) {
+    lookup_type_ = DESCRIPTOR_TYPE;
+    holder_ = holder;
+    details_ = PropertyDetails(details);
+    number_ = number;
+  }
+
   void ConstantResult(JSObject* holder) {
     lookup_type_ = CONSTANT_TYPE;
     holder_ = holder;
diff --git a/src/virtual-frame-inl.h b/src/proxy.js
similarity index 82%
rename from src/virtual-frame-inl.h
rename to src/proxy.js
index c9f4aac..2516983 100644
--- a/src/virtual-frame-inl.h
+++ b/src/proxy.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,15 +25,4 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifndef V8_VIRTUAL_FRAME_INL_H_
-#define V8_VIRTUAL_FRAME_INL_H_
-
-#include "virtual-frame.h"
-
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
-#include "virtual-frame-heavy-inl.h"
-#else
-#include "virtual-frame-light-inl.h"
-#endif
-
-#endif  // V8_VIRTUAL_FRAME_INL_H_
+global.Proxy = new $Object();
diff --git a/src/register-allocator-inl.h b/src/register-allocator-inl.h
deleted file mode 100644
index 5a68ab0..0000000
--- a/src/register-allocator-inl.h
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_INL_H_
-#define V8_REGISTER_ALLOCATOR_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-
-namespace v8 {
-namespace internal {
-
-Result::Result(const Result& other) {
-  other.CopyTo(this);
-}
-
-
-Result& Result::operator=(const Result& other) {
-  if (this != &other) {
-    Unuse();
-    other.CopyTo(this);
-  }
-  return *this;
-}
-
-
-Result::~Result() {
-  if (is_register()) {
-    CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
-  }
-}
-
-
-void Result::Unuse() {
-  if (is_register()) {
-    CodeGeneratorScope::Current(Isolate::Current())->allocator()->Unuse(reg());
-  }
-  invalidate();
-}
-
-
-void Result::CopyTo(Result* destination) const {
-  destination->value_ = value_;
-  if (is_register()) {
-    CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg());
-  }
-}
-
-
-bool RegisterAllocator::is_used(Register reg) {
-  return registers_.is_used(ToNumber(reg));
-}
-
-
-int RegisterAllocator::count(Register reg) {
-  return registers_.count(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Use(Register reg) {
-  registers_.Use(ToNumber(reg));
-}
-
-
-void RegisterAllocator::Unuse(Register reg) {
-  registers_.Unuse(ToNumber(reg));
-}
-
-
-TypeInfo Result::type_info() const {
-  ASSERT(is_valid());
-  return TypeInfo::FromInt(TypeInfoField::decode(value_));
-}
-
-
-void Result::set_type_info(TypeInfo info) {
-  ASSERT(is_valid());
-  value_ &= ~TypeInfoField::mask();
-  value_ |= TypeInfoField::encode(info.ToInt());
-}
-
-
-bool Result::is_number() const {
-  return type_info().IsNumber();
-}
-
-
-bool Result::is_smi() const {
-  return type_info().IsSmi();
-}
-
-
-bool Result::is_integer32() const {
-  return type_info().IsInteger32();
-}
-
-
-bool Result::is_double() const {
-  return type_info().IsDouble();
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_REGISTER_ALLOCATOR_INL_H_
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
deleted file mode 100644
index cb5e35f..0000000
--- a/src/register-allocator.cc
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-
-Result::Result(Register reg, TypeInfo info) {
-  ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
-  CodeGeneratorScope::Current(Isolate::Current())->allocator()->Use(reg);
-  value_ = TypeField::encode(REGISTER)
-      | TypeInfoField::encode(info.ToInt())
-      | DataField::encode(reg.code_);
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-
-Result RegisterAllocator::AllocateWithoutSpilling() {
-  // Return the first free register, if any.
-  int num = registers_.ScanForFreeRegister();
-  if (num == RegisterAllocator::kInvalidRegister) {
-    return Result();
-  }
-  return Result(RegisterAllocator::ToRegister(num));
-}
-
-
-Result RegisterAllocator::Allocate() {
-  Result result = AllocateWithoutSpilling();
-  if (!result.is_valid()) {
-    // Ask the current frame to spill a register.
-    ASSERT(cgen_->has_valid_frame());
-    Register free_reg = cgen_->frame()->SpillAnyRegister();
-    if (free_reg.is_valid()) {
-      ASSERT(!is_used(free_reg));
-      return Result(free_reg);
-    }
-  }
-  return result;
-}
-
-
-Result RegisterAllocator::Allocate(Register target) {
-  // If the target is not referenced, it can simply be allocated.
-  if (!is_used(RegisterAllocator::ToNumber(target))) {
-    return Result(target);
-  }
-  // If the target is only referenced in the frame, it can be spilled and
-  // then allocated.
-  ASSERT(cgen_->has_valid_frame());
-  if (cgen_->frame()->is_used(RegisterAllocator::ToNumber(target)) &&
-      count(target) == 1)  {
-    cgen_->frame()->Spill(target);
-    ASSERT(!is_used(RegisterAllocator::ToNumber(target)));
-    return Result(target);
-  }
-  // Otherwise (if it's referenced outside the frame) we cannot allocate it.
-  return Result();
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/register-allocator.h b/src/register-allocator.h
deleted file mode 100644
index f0ef9c3..0000000
--- a/src/register-allocator.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_REGISTER_ALLOCATOR_H_
-#define V8_REGISTER_ALLOCATOR_H_
-
-#include "macro-assembler.h"
-#include "type-info.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/register-allocator-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/register-allocator-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/register-allocator-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/register-allocator-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-// -------------------------------------------------------------------------
-// Results
-//
-// Results encapsulate the compile-time values manipulated by the code
-// generator.  They can represent registers or constants.
-
-class Result BASE_EMBEDDED {
- public:
-  enum Type {
-    INVALID,
-    REGISTER,
-    CONSTANT
-  };
-
-  // Construct an invalid result.
-  Result() { invalidate(); }
-
-  // Construct a register Result.
-  explicit Result(Register reg, TypeInfo info = TypeInfo::Unknown());
-
-  // Construct a Result whose value is a compile-time constant.
-  explicit Result(Handle<Object> value) {
-    ZoneObjectList* constant_list = Isolate::Current()->result_constant_list();
-    TypeInfo info = TypeInfo::TypeFromValue(value);
-    value_ = TypeField::encode(CONSTANT)
-        | TypeInfoField::encode(info.ToInt())
-        | IsUntaggedInt32Field::encode(false)
-        | DataField::encode(constant_list->length());
-    constant_list->Add(value);
-  }
-
-  // The copy constructor and assignment operators could each create a new
-  // register reference.
-  inline Result(const Result& other);
-
-  inline Result& operator=(const Result& other);
-
-  inline ~Result();
-
-  inline void Unuse();
-
-  Type type() const { return TypeField::decode(value_); }
-
-  void invalidate() { value_ = TypeField::encode(INVALID); }
-
-  inline TypeInfo type_info() const;
-  inline void set_type_info(TypeInfo info);
-  inline bool is_number() const;
-  inline bool is_smi() const;
-  inline bool is_integer32() const;
-  inline bool is_double() const;
-
-  bool is_valid() const { return type() != INVALID; }
-  bool is_register() const { return type() == REGISTER; }
-  bool is_constant() const { return type() == CONSTANT; }
-
-  // An untagged int32 Result contains a signed int32 in a register
-  // or as a constant.  These are only allowed in a side-effect-free
-  // int32 calculation, and if a non-int32 input shows up or an overflow
-  // occurs, we bail out and drop all the int32 values.  Constants are
-  // not converted to int32 until they are loaded into a register.
-  bool is_untagged_int32() const {
-    return IsUntaggedInt32Field::decode(value_);
-  }
-  void set_untagged_int32(bool value) {
-    value_ &= ~IsUntaggedInt32Field::mask();
-    value_ |= IsUntaggedInt32Field::encode(value);
-  }
-
-  Register reg() const {
-    ASSERT(is_register());
-    uint32_t reg = DataField::decode(value_);
-    Register result;
-    result.code_ = reg;
-    return result;
-  }
-
-  Handle<Object> handle() const {
-    ASSERT(type() == CONSTANT);
-    return Isolate::Current()->result_constant_list()->
-        at(DataField::decode(value_));
-  }
-
-  // Move this result to an arbitrary register.  The register is not
-  // necessarily spilled from the frame or even singly-referenced outside
-  // it.
-  void ToRegister();
-
-  // Move this result to a specified register.  The register is spilled from
-  // the frame, and the register is singly-referenced (by this result)
-  // outside the frame.
-  void ToRegister(Register reg);
-
- private:
-  uint32_t value_;
-
-  // Declare BitFields with template parameters <type, start, size>.
-  class TypeField: public BitField<Type, 0, 2> {};
-  class TypeInfoField : public BitField<int, 2, 6> {};
-  class IsUntaggedInt32Field : public BitField<bool, 8, 1> {};
-  class DataField: public BitField<uint32_t, 9, 32 - 9> {};
-
-  inline void CopyTo(Result* destination) const;
-
-  friend class CodeGeneratorScope;
-};
-
-
-// -------------------------------------------------------------------------
-// Register file
-//
-// The register file tracks reference counts for the processor registers.
-// It is used by both the register allocator and the virtual frame.
-
-class RegisterFile BASE_EMBEDDED {
- public:
-  RegisterFile() { Reset(); }
-
-  void Reset() {
-    for (int i = 0; i < kNumRegisters; i++) {
-      ref_counts_[i] = 0;
-    }
-  }
-
-  // Predicates and accessors for the reference counts.
-  bool is_used(int num) {
-    ASSERT(0 <= num && num < kNumRegisters);
-    return ref_counts_[num] > 0;
-  }
-
-  int count(int num) {
-    ASSERT(0 <= num && num < kNumRegisters);
-    return ref_counts_[num];
-  }
-
-  // Record a use of a register by incrementing its reference count.
-  void Use(int num) {
-    ASSERT(0 <= num && num < kNumRegisters);
-    ref_counts_[num]++;
-  }
-
-  // Record that a register will no longer be used by decrementing its
-  // reference count.
-  void Unuse(int num) {
-    ASSERT(is_used(num));
-    ref_counts_[num]--;
-  }
-
-  // Copy the reference counts from this register file to the other.
-  void CopyTo(RegisterFile* other) {
-    for (int i = 0; i < kNumRegisters; i++) {
-      other->ref_counts_[i] = ref_counts_[i];
-    }
-  }
-
- private:
-  // C++ doesn't like zero length arrays, so we make the array length 1 even if
-  // we don't need it.
-  static const int kNumRegisters =
-      (RegisterAllocatorConstants::kNumRegisters == 0) ?
-      1 : RegisterAllocatorConstants::kNumRegisters;
-
-  int ref_counts_[kNumRegisters];
-
-  // Very fast inlined loop to find a free register.  Used in
-  // RegisterAllocator::AllocateWithoutSpilling.  Returns
-  // kInvalidRegister if no free register found.
-  int ScanForFreeRegister() {
-    for (int i = 0; i < RegisterAllocatorConstants::kNumRegisters; i++) {
-      if (!is_used(i)) return i;
-    }
-    return RegisterAllocatorConstants::kInvalidRegister;
-  }
-
-  friend class RegisterAllocator;
-};
-
-
-// -------------------------------------------------------------------------
-// Register allocator
-//
-
-class RegisterAllocator BASE_EMBEDDED {
- public:
-  static const int kNumRegisters =
-      RegisterAllocatorConstants::kNumRegisters;
-  static const int kInvalidRegister =
-      RegisterAllocatorConstants::kInvalidRegister;
-
-  explicit RegisterAllocator(CodeGenerator* cgen) : cgen_(cgen) {}
-
-  // True if the register is reserved by the code generator, false if it
-  // can be freely used by the allocator Defined in the
-  // platform-specific XXX-inl.h files..
-  static inline bool IsReserved(Register reg);
-
-  // Convert between (unreserved) assembler registers and allocator
-  // numbers.  Defined in the platform-specific XXX-inl.h files.
-  static inline int ToNumber(Register reg);
-  static inline Register ToRegister(int num);
-
-  // Predicates and accessors for the registers' reference counts.
-  bool is_used(int num) { return registers_.is_used(num); }
-  inline bool is_used(Register reg);
-
-  int count(int num) { return registers_.count(num); }
-  inline int count(Register reg);
-
-  // Explicitly record a reference to a register.
-  void Use(int num) { registers_.Use(num); }
-  inline void Use(Register reg);
-
-  // Explicitly record that a register will no longer be used.
-  void Unuse(int num) { registers_.Unuse(num); }
-  inline void Unuse(Register reg);
-
-  // Reset the register reference counts to free all non-reserved registers.
-  void Reset() { registers_.Reset(); }
-
-  // Initialize the register allocator for entry to a JS function.  On
-  // entry, the (non-reserved) registers used by the JS calling
-  // convention are referenced and the other (non-reserved) registers
-  // are free.
-  inline void Initialize();
-
-  // Allocate a free register and return a register result if possible or
-  // fail and return an invalid result.
-  Result Allocate();
-
-  // Allocate a specific register if possible, spilling it from the
-  // current frame if necessary, or else fail and return an invalid
-  // result.
-  Result Allocate(Register target);
-
-  // Allocate a free register without spilling any from the current
-  // frame or fail and return an invalid result.
-  Result AllocateWithoutSpilling();
-
-  // Allocate a free byte register without spilling any from the current
-  // frame or fail and return an invalid result.
-  Result AllocateByteRegisterWithoutSpilling();
-
-  // Copy the internal state to a register file, to be restored later by
-  // RestoreFrom.
-  void SaveTo(RegisterFile* register_file) {
-    registers_.CopyTo(register_file);
-  }
-
-  // Restore the internal state.
-  void RestoreFrom(RegisterFile* register_file) {
-    register_file->CopyTo(&registers_);
-  }
-
- private:
-  CodeGenerator* cgen_;
-  RegisterFile registers_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 780314d..efe8044 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,649 +36,6 @@
 namespace v8 {
 namespace internal {
 
-class AstOptimizer: public AstVisitor {
- public:
-  explicit AstOptimizer() : has_function_literal_(false) {}
-
-  void Optimize(ZoneList<Statement*>* statements);
-
- private:
-  // Used for loop condition analysis.  Cleared before visiting a loop
-  // condition, set when a function literal is visited.
-  bool has_function_literal_;
-
-  // Helpers
-  void OptimizeArguments(ZoneList<Expression*>* arguments);
-
-  // Node visitors.
-#define DEF_VISIT(type) \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
-};
-
-
-void AstOptimizer::Optimize(ZoneList<Statement*>* statements) {
-  int len = statements->length();
-  for (int i = 0; i < len; i++) {
-    Visit(statements->at(i));
-  }
-}
-
-
-void AstOptimizer::OptimizeArguments(ZoneList<Expression*>* arguments) {
-  for (int i = 0; i < arguments->length(); i++) {
-    Visit(arguments->at(i));
-  }
-}
-
-
-void AstOptimizer::VisitBlock(Block* node) {
-  Optimize(node->statements());
-}
-
-
-void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
-  node->expression()->set_no_negative_zero(true);
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitIfStatement(IfStatement* node) {
-  node->condition()->set_no_negative_zero(true);
-  Visit(node->condition());
-  Visit(node->then_statement());
-  if (node->HasElseStatement()) {
-    Visit(node->else_statement());
-  }
-}
-
-
-void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
-  node->cond()->set_no_negative_zero(true);
-  Visit(node->cond());
-  Visit(node->body());
-}
-
-
-void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
-  has_function_literal_ = false;
-  node->cond()->set_no_negative_zero(true);
-  Visit(node->cond());
-  node->set_may_have_function_literal(has_function_literal_);
-  Visit(node->body());
-}
-
-
-void AstOptimizer::VisitForStatement(ForStatement* node) {
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-  if (node->cond() != NULL) {
-    has_function_literal_ = false;
-    node->cond()->set_no_negative_zero(true);
-    Visit(node->cond());
-    node->set_may_have_function_literal(has_function_literal_);
-  }
-  Visit(node->body());
-  if (node->next() != NULL) {
-    Visit(node->next());
-  }
-}
-
-
-void AstOptimizer::VisitForInStatement(ForInStatement* node) {
-  Visit(node->each());
-  Visit(node->enumerable());
-  Visit(node->body());
-}
-
-
-void AstOptimizer::VisitTryCatchStatement(TryCatchStatement* node) {
-  Visit(node->try_block());
-  Visit(node->catch_var());
-  Visit(node->catch_block());
-}
-
-
-void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  Visit(node->try_block());
-  Visit(node->finally_block());
-}
-
-
-void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
-  node->tag()->set_no_negative_zero(true);
-  Visit(node->tag());
-  for (int i = 0; i < node->cases()->length(); i++) {
-    CaseClause* clause = node->cases()->at(i);
-    if (!clause->is_default()) {
-      Visit(clause->label());
-    }
-    Optimize(clause->statements());
-  }
-}
-
-
-void AstOptimizer::VisitContinueStatement(ContinueStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitBreakStatement(BreakStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitDeclaration(Declaration* node) {
-  // Will not be reached by the current optimizations.
-  USE(node);
-}
-
-
-void AstOptimizer::VisitEmptyStatement(EmptyStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitReturnStatement(ReturnStatement* node) {
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithEnterStatement(WithEnterStatement* node) {
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitWithExitStatement(WithExitStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitDebuggerStatement(DebuggerStatement* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) {
-  has_function_literal_ = true;
-}
-
-
-void AstOptimizer::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitConditional(Conditional* node) {
-  node->condition()->set_no_negative_zero(true);
-  Visit(node->condition());
-  Visit(node->then_expression());
-  Visit(node->else_expression());
-}
-
-
-void AstOptimizer::VisitVariableProxy(VariableProxy* node) {
-  Variable* var = node->AsVariable();
-  if (var != NULL) {
-    if (var->type()->IsKnown()) {
-      node->type()->CopyFrom(var->type());
-    } else if (node->type()->IsLikelySmi()) {
-      var->type()->SetAsLikelySmi();
-    }
-
-    if (FLAG_safe_int32_compiler) {
-      if (var->IsStackAllocated() &&
-          !var->is_arguments() &&
-          var->mode() != Variable::CONST) {
-        node->set_side_effect_free(true);
-      }
-    }
-  }
-}
-
-
-void AstOptimizer::VisitLiteral(Literal* node) {
-  Handle<Object> literal = node->handle();
-  if (literal->IsSmi()) {
-    node->type()->SetAsLikelySmi();
-    node->set_side_effect_free(true);
-  } else if (literal->IsHeapNumber()) {
-    if (node->to_int32()) {
-      // Any HeapNumber has an int32 value if it is the input to a bit op.
-      node->set_side_effect_free(true);
-    } else {
-      double double_value = HeapNumber::cast(*literal)->value();
-      int32_t int32_value = DoubleToInt32(double_value);
-      node->set_side_effect_free(double_value == int32_value);
-    }
-  }
-}
-
-
-void AstOptimizer::VisitRegExpLiteral(RegExpLiteral* node) {
-  USE(node);
-}
-
-
-void AstOptimizer::VisitArrayLiteral(ArrayLiteral* node) {
-  for (int i = 0; i < node->values()->length(); i++) {
-    Visit(node->values()->at(i));
-  }
-}
-
-void AstOptimizer::VisitObjectLiteral(ObjectLiteral* node) {
-  for (int i = 0; i < node->properties()->length(); i++) {
-    Visit(node->properties()->at(i)->key());
-    Visit(node->properties()->at(i)->value());
-  }
-}
-
-
-void AstOptimizer::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  Visit(node->key());
-  Visit(node->value());
-}
-
-
-void AstOptimizer::VisitAssignment(Assignment* node) {
-  switch (node->op()) {
-    case Token::INIT_VAR:
-    case Token::INIT_CONST:
-    case Token::ASSIGN:
-      // No type can be infered from the general assignment.
-      break;
-    case Token::ASSIGN_BIT_OR:
-    case Token::ASSIGN_BIT_XOR:
-    case Token::ASSIGN_BIT_AND:
-    case Token::ASSIGN_SHL:
-    case Token::ASSIGN_SAR:
-    case Token::ASSIGN_SHR:
-      node->type()->SetAsLikelySmiIfUnknown();
-      node->target()->type()->SetAsLikelySmiIfUnknown();
-      node->value()->type()->SetAsLikelySmiIfUnknown();
-      node->value()->set_to_int32(true);
-      node->value()->set_no_negative_zero(true);
-      break;
-    case Token::ASSIGN_ADD:
-    case Token::ASSIGN_SUB:
-    case Token::ASSIGN_MUL:
-    case Token::ASSIGN_DIV:
-    case Token::ASSIGN_MOD:
-      if (node->type()->IsLikelySmi()) {
-        node->target()->type()->SetAsLikelySmiIfUnknown();
-        node->value()->type()->SetAsLikelySmiIfUnknown();
-      }
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  Visit(node->target());
-  Visit(node->value());
-
-  switch (node->op()) {
-    case Token::INIT_VAR:
-    case Token::INIT_CONST:
-    case Token::ASSIGN:
-      // Pure assignment copies the type from the value.
-      node->type()->CopyFrom(node->value()->type());
-      break;
-    case Token::ASSIGN_BIT_OR:
-    case Token::ASSIGN_BIT_XOR:
-    case Token::ASSIGN_BIT_AND:
-    case Token::ASSIGN_SHL:
-    case Token::ASSIGN_SAR:
-    case Token::ASSIGN_SHR:
-      // Should have been setup above already.
-      break;
-    case Token::ASSIGN_ADD:
-    case Token::ASSIGN_SUB:
-    case Token::ASSIGN_MUL:
-    case Token::ASSIGN_DIV:
-    case Token::ASSIGN_MOD:
-      if (node->type()->IsUnknown()) {
-        if (node->target()->type()->IsLikelySmi() ||
-            node->value()->type()->IsLikelySmi()) {
-          node->type()->SetAsLikelySmi();
-        }
-      }
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // Since this is an assignment. We have to propagate this node's type to the
-  // variable.
-  VariableProxy* proxy = node->target()->AsVariableProxy();
-  if (proxy != NULL) {
-    Variable* var = proxy->AsVariable();
-    if (var != NULL) {
-      StaticType* var_type = var->type();
-      if (var_type->IsUnknown()) {
-        var_type->CopyFrom(node->type());
-      } else if (var_type->IsLikelySmi()) {
-        // We do not reset likely types to Unknown.
-      }
-    }
-  }
-}
-
-
-void AstOptimizer::VisitThrow(Throw* node) {
-  Visit(node->exception());
-}
-
-
-void AstOptimizer::VisitProperty(Property* node) {
-  node->key()->set_no_negative_zero(true);
-  Visit(node->obj());
-  Visit(node->key());
-}
-
-
-void AstOptimizer::VisitCall(Call* node) {
-  Visit(node->expression());
-  OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallNew(CallNew* node) {
-  Visit(node->expression());
-  OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitCallRuntime(CallRuntime* node) {
-  OptimizeArguments(node->arguments());
-}
-
-
-void AstOptimizer::VisitUnaryOperation(UnaryOperation* node) {
-  if (node->op() == Token::ADD || node->op() == Token::SUB) {
-    node->expression()->set_no_negative_zero(node->no_negative_zero());
-  } else {
-    node->expression()->set_no_negative_zero(true);
-  }
-  Visit(node->expression());
-  if (FLAG_safe_int32_compiler) {
-    switch (node->op()) {
-      case Token::BIT_NOT:
-        node->expression()->set_no_negative_zero(true);
-        node->expression()->set_to_int32(true);
-        // Fall through.
-      case Token::ADD:
-      case Token::SUB:
-        node->set_side_effect_free(node->expression()->side_effect_free());
-        break;
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-      case Token::VOID:
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  } else if (node->op() == Token::BIT_NOT) {
-    node->expression()->set_to_int32(true);
-  }
-}
-
-
-void AstOptimizer::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
-void AstOptimizer::VisitCountOperation(CountOperation* node) {
-  // Count operations assume that they work on Smis.
-  node->expression()->set_no_negative_zero(node->is_prefix() ?
-                                           true :
-                                           node->no_negative_zero());
-  node->type()->SetAsLikelySmiIfUnknown();
-  node->expression()->type()->SetAsLikelySmiIfUnknown();
-  Visit(node->expression());
-}
-
-
-static bool CouldBeNegativeZero(AstNode* node) {
-  Literal* literal = node->AsLiteral();
-  if (literal != NULL) {
-    Handle<Object> handle = literal->handle();
-    if (handle->IsString() || handle->IsSmi()) {
-      return false;
-    } else if (handle->IsHeapNumber()) {
-      double double_value = HeapNumber::cast(*handle)->value();
-      if (double_value != 0) {
-        return false;
-      }
-    }
-  }
-  BinaryOperation* binary = node->AsBinaryOperation();
-  if (binary != NULL && Token::IsBitOp(binary->op())) {
-    return false;
-  }
-  return true;
-}
-
-
-static bool CouldBePositiveZero(AstNode* node) {
-  Literal* literal = node->AsLiteral();
-  if (literal != NULL) {
-    Handle<Object> handle = literal->handle();
-    if (handle->IsSmi()) {
-      if (Smi::cast(*handle) != Smi::FromInt(0)) {
-        return false;
-      }
-    } else if (handle->IsHeapNumber()) {
-      // Heap number literal can't be +0, because that's a Smi.
-      return false;
-    }
-  }
-  return true;
-}
-
-
-void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
-  // Depending on the operation we can propagate this node's type down the
-  // AST nodes.
-  Token::Value op = node->op();
-  switch (op) {
-    case Token::COMMA:
-    case Token::OR:
-      node->left()->set_no_negative_zero(true);
-      node->right()->set_no_negative_zero(node->no_negative_zero());
-      break;
-    case Token::AND:
-      node->left()->set_no_negative_zero(node->no_negative_zero());
-      node->right()->set_no_negative_zero(node->no_negative_zero());
-      break;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-    case Token::SHL:
-    case Token::SAR:
-    case Token::SHR:
-      node->type()->SetAsLikelySmiIfUnknown();
-      node->left()->type()->SetAsLikelySmiIfUnknown();
-      node->right()->type()->SetAsLikelySmiIfUnknown();
-      node->left()->set_to_int32(true);
-      node->right()->set_to_int32(true);
-      node->left()->set_no_negative_zero(true);
-      node->right()->set_no_negative_zero(true);
-      break;
-    case Token::MUL: {
-      VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
-      VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
-      if (lvar_proxy != NULL && rvar_proxy != NULL) {
-        Variable* lvar = lvar_proxy->AsVariable();
-        Variable* rvar = rvar_proxy->AsVariable();
-        if (lvar != NULL && rvar != NULL) {
-          if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
-            Slot* lslot = lvar->AsSlot();
-            Slot* rslot = rvar->AsSlot();
-            if (lslot->type() == rslot->type() &&
-                (lslot->type() == Slot::PARAMETER ||
-                 lslot->type() == Slot::LOCAL) &&
-                lslot->index() == rslot->index()) {
-              // A number squared doesn't give negative zero.
-              node->set_no_negative_zero(true);
-            }
-          }
-        }
-      }
-    }
-    case Token::ADD:
-    case Token::SUB:
-    case Token::DIV:
-    case Token::MOD: {
-      if (node->type()->IsLikelySmi()) {
-        node->left()->type()->SetAsLikelySmiIfUnknown();
-        node->right()->type()->SetAsLikelySmiIfUnknown();
-      }
-      if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
-                               !CouldBeNegativeZero(node->right()))) {
-        node->left()->set_no_negative_zero(true);
-        node->right()->set_no_negative_zero(true);
-      } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
-                                      !CouldBePositiveZero(node->right()))) {
-        node->left()->set_no_negative_zero(true);
-        node->right()->set_no_negative_zero(true);
-      } else {
-        node->left()->set_no_negative_zero(node->no_negative_zero());
-        node->right()->set_no_negative_zero(node->no_negative_zero());
-      }
-      if (node->op() == Token::DIV) {
-        node->right()->set_no_negative_zero(false);
-      } else if (node->op() == Token::MOD) {
-        node->right()->set_no_negative_zero(true);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  Visit(node->left());
-  Visit(node->right());
-
-  // After visiting the operand nodes we have to check if this node's type
-  // can be updated. If it does, then we can push that information down
-  // towards the leaves again if the new information is an upgrade over the
-  // previous type of the operand nodes.
-  if (node->type()->IsUnknown()) {
-    if (node->left()->type()->IsLikelySmi() ||
-        node->right()->type()->IsLikelySmi()) {
-      node->type()->SetAsLikelySmi();
-    }
-    if (node->type()->IsLikelySmi()) {
-      // The type of this node changed to LIKELY_SMI. Propagate this knowledge
-      // down through the nodes.
-      if (node->left()->type()->IsUnknown()) {
-        node->left()->type()->SetAsLikelySmi();
-        Visit(node->left());
-      }
-      if (node->right()->type()->IsUnknown()) {
-        node->right()->type()->SetAsLikelySmi();
-        Visit(node->right());
-      }
-    }
-  }
-
-  if (FLAG_safe_int32_compiler) {
-    switch (node->op()) {
-      case Token::COMMA:
-      case Token::OR:
-      case Token::AND:
-        break;
-      case Token::BIT_OR:
-      case Token::BIT_XOR:
-      case Token::BIT_AND:
-      case Token::SHL:
-      case Token::SAR:
-      case Token::SHR:
-        // Add one to the number of bit operations in this expression.
-        node->set_num_bit_ops(1);
-        // Fall through.
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV:
-      case Token::MOD:
-        node->set_side_effect_free(node->left()->side_effect_free() &&
-                                   node->right()->side_effect_free());
-        node->set_num_bit_ops(node->num_bit_ops() +
-                                  node->left()->num_bit_ops() +
-                                  node->right()->num_bit_ops());
-        if (!node->no_negative_zero() && node->op() == Token::MUL) {
-          node->set_side_effect_free(false);
-        }
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
-  if (node->type()->IsKnown()) {
-    // Propagate useful information down towards the leaves.
-    node->left()->type()->SetAsLikelySmiIfUnknown();
-    node->right()->type()->SetAsLikelySmiIfUnknown();
-  }
-
-  node->left()->set_no_negative_zero(true);
-  // Only [[HasInstance]] has the right argument passed unchanged to it.
-  node->right()->set_no_negative_zero(true);
-
-  Visit(node->left());
-  Visit(node->right());
-
-  // After visiting the operand nodes we have to check if this node's type
-  // can be updated. If it does, then we can push that information down
-  // towards the leaves again if the new information is an upgrade over the
-  // previous type of the operand nodes.
-  if (node->type()->IsUnknown()) {
-    if (node->left()->type()->IsLikelySmi() ||
-        node->right()->type()->IsLikelySmi()) {
-      node->type()->SetAsLikelySmi();
-    }
-    if (node->type()->IsLikelySmi()) {
-      // The type of this node changed to LIKELY_SMI. Propagate this knowledge
-      // down through the nodes.
-      if (node->left()->type()->IsUnknown()) {
-        node->left()->type()->SetAsLikelySmi();
-        Visit(node->left());
-      }
-      if (node->right()->type()->IsUnknown()) {
-        node->right()->type()->SetAsLikelySmi();
-        Visit(node->right());
-      }
-    }
-  }
-}
-
-
-void AstOptimizer::VisitCompareToNull(CompareToNull* node) {
-  Visit(node->expression());
-}
-
-
-void AstOptimizer::VisitThisFunction(ThisFunction* node) {
-  USE(node);
-}
-
-
 class Processor: public AstVisitor {
  public:
   explicit Processor(Variable* result)
@@ -943,11 +300,6 @@
 }
 
 
-void Processor::VisitIncrementOperation(IncrementOperation* node) {
-  UNREACHABLE();
-}
-
-
 void Processor::VisitCountOperation(CountOperation* node) {
   USE(node);
   UNREACHABLE();
@@ -1005,20 +357,4 @@
 }
 
 
-// Assumes code has been parsed and scopes have been analyzed.  Mutates the
-// AST, so the AST should not continue to be used in the case of failure.
-bool Rewriter::Analyze(CompilationInfo* info) {
-  FunctionLiteral* function = info->function();
-  ASSERT(function != NULL && function->scope() != NULL);
-
-  ZoneList<Statement*>* body = function->body();
-  if (FLAG_optimize_ast && !body->is_empty()) {
-    AstOptimizer optimizer;
-    optimizer.Optimize(body);
-    if (optimizer.HasStackOverflow()) return false;
-  }
-  return true;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/rewriter.h b/src/rewriter.h
index 62e1b7f..59914d9 100644
--- a/src/rewriter.h
+++ b/src/rewriter.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,15 +42,6 @@
   // Assumes code has been parsed and scopes have been analyzed.  Mutates the
   // AST, so the AST should not continue to be used in the case of failure.
   static bool Rewrite(CompilationInfo* info);
-
-  // Perform a suite of simple non-iterative analyses of the AST.  Mark
-  // expressions that are likely smis, expressions without side effects,
-  // expressions whose value will be converted to Int32, and expressions in a
-  // context where +0 and -0 are treated the same.
-  //
-  // Assumes code has been parsed and scopes have been analyzed.  Mutates the
-  // AST, so the AST should not continue to be used in the case of failure.
-  static bool Analyze(CompilationInfo* info);
 };
 
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index c6e2b46..8d258ac 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -107,12 +107,6 @@
 }
 
 
-static bool IsOptimizable(JSFunction* function) {
-  Code* code = function->code();
-  return code->kind() == Code::FUNCTION && code->optimizable();
-}
-
-
 Atomic32 RuntimeProfiler::state_ = 0;
 // TODO(isolates): Create the semaphore lazily and clean it up when no
 // longer required.
@@ -120,6 +114,11 @@
 Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
 #endif
 
+#ifdef DEBUG
+bool RuntimeProfiler::has_been_globally_setup_ = false;
+#endif
+bool RuntimeProfiler::enabled_ = false;
+
 
 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
     : isolate_(isolate),
@@ -130,24 +129,31 @@
       js_ratio_(0),
       sampler_window_position_(0),
       optimize_soon_list_(NULL),
-      state_window_position_(0) {
-  state_counts_[0] = kStateWindowSize;
-  state_counts_[1] = 0;
+      state_window_position_(0),
+      state_window_ticks_(0) {
+  state_counts_[IN_NON_JS_STATE] = kStateWindowSize;
+  state_counts_[IN_JS_STATE] = 0;
+  STATIC_ASSERT(IN_NON_JS_STATE == 0);
   memset(state_window_, 0, sizeof(state_window_));
   ClearSampleBuffer();
 }
 
 
-bool RuntimeProfiler::IsEnabled() {
-  return V8::UseCrankshaft() && FLAG_opt;
+void RuntimeProfiler::GlobalSetup() {
+  ASSERT(!has_been_globally_setup_);
+  enabled_ = V8::UseCrankshaft() && FLAG_opt;
+#ifdef DEBUG
+  has_been_globally_setup_ = true;
+#endif
 }
 
 
 void RuntimeProfiler::Optimize(JSFunction* function, bool eager, int delay) {
-  ASSERT(IsOptimizable(function));
+  ASSERT(function->IsOptimizable());
   if (FLAG_trace_opt) {
     PrintF("[marking (%s) ", eager ? "eagerly" : "lazily");
     function->PrintName();
+    PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
     PrintF(" for recompilation");
     if (delay > 0) {
       PrintF(" (delayed %0.3f ms)", static_cast<double>(delay) / 1000);
@@ -243,7 +249,7 @@
     if (current->IsValid()) {
       Handle<JSFunction> function = current->function();
       int delay = current->Delay();
-      if (IsOptimizable(*function)) {
+      if (function->IsOptimizable()) {
         Optimize(*function, true, delay);
       }
     }
@@ -258,7 +264,7 @@
   JSFunction* samples[kSamplerFrameCount];
   int sample_count = 0;
   int frame_count = 0;
-  for (JavaScriptFrameIterator it;
+  for (JavaScriptFrameIterator it(isolate_);
        frame_count++ < kSamplerFrameCount && !it.done();
        it.Advance()) {
     JavaScriptFrame* frame = it.frame();
@@ -288,7 +294,7 @@
     }
 
     // Do not record non-optimizable functions.
-    if (!IsOptimizable(function)) continue;
+    if (!function->IsOptimizable()) continue;
     samples[sample_count++] = function;
 
     int function_size = function->shared()->SourceSize();
@@ -328,7 +334,7 @@
 
 
 void RuntimeProfiler::OptimizeSoon(JSFunction* function) {
-  if (!IsOptimizable(function)) return;
+  if (!function->IsOptimizable()) return;
   PendingListNode* node = new PendingListNode(function);
   node->set_next(optimize_soon_list_);
   optimize_soon_list_ = node;
@@ -344,8 +350,12 @@
   ASSERT(IsPowerOf2(kStateWindowSize));
   state_window_position_ = (state_window_position_ + 1) &
       (kStateWindowSize - 1);
+  // Note: to calculate correct ratio we have to track how many valid
+  // ticks are actually in the state window, because on profiler
+  // startup this number can be less than the window size.
+  state_window_ticks_ = Min(kStateWindowSize, state_window_ticks_ + 1);
   NoBarrier_Store(&js_ratio_, state_counts_[IN_JS_STATE] * 100 /
-                  kStateWindowSize);
+                  state_window_ticks_);
 }
 #endif
 
@@ -363,6 +373,7 @@
 
 
 void RuntimeProfiler::Setup() {
+  ASSERT(has_been_globally_setup_);
   ClearSampleBuffer();
   // If the ticker hasn't already started, make sure to do so to get
   // the ticks for the runtime profiler.
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 8074035..692b4ff 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -40,18 +40,16 @@
 class PendingListNode;
 class Semaphore;
 
-
-enum SamplerState {
-  IN_NON_JS_STATE = 0,
-  IN_JS_STATE = 1
-};
-
-
 class RuntimeProfiler {
  public:
   explicit RuntimeProfiler(Isolate* isolate);
 
-  static bool IsEnabled();
+  static void GlobalSetup();
+
+  static inline bool IsEnabled() {
+    ASSERT(has_been_globally_setup_);
+    return enabled_;
+  }
 
   void OptimizeNow();
   void OptimizeSoon(JSFunction* function);
@@ -101,6 +99,11 @@
   static const int kSamplerWindowSize = 16;
   static const int kStateWindowSize = 128;
 
+  enum SamplerState {
+    IN_NON_JS_STATE = 0,
+    IN_JS_STATE = 1
+  };
+
   static void HandleWakeUp(Isolate* isolate);
 
   void Optimize(JSFunction* function, bool eager, int delay);
@@ -137,6 +140,7 @@
 
   SamplerState state_window_[kStateWindowSize];
   int state_window_position_;
+  int state_window_ticks_;
   int state_counts_[2];
 
   // Possible state values:
@@ -144,6 +148,11 @@
   //   0 or positive => the number of isolates running JavaScript code.
   static Atomic32 state_;
   static Semaphore* semaphore_;
+
+#ifdef DEBUG
+  static bool has_been_globally_setup_;
+#endif
+  static bool enabled_;
 };
 
 
diff --git a/src/runtime.cc b/src/runtime.cc
index c979849..53c048e 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -224,17 +224,13 @@
 }
 
 
-static MaybeObject* Runtime_CloneLiteralBoilerplate(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
   CONVERT_CHECKED(JSObject, boilerplate, args[0]);
   return DeepCopyBoilerplate(isolate, boilerplate);
 }
 
 
-static MaybeObject* Runtime_CloneShallowLiteralBoilerplate(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
   CONVERT_CHECKED(JSObject, boilerplate, args[0]);
   return isolate->heap()->CopyJSObject(boilerplate);
 }
@@ -475,9 +471,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateArrayLiteralBoilerplate(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
   // Takes a FixedArray of elements containing the literal elements of
   // the array literal and produces JSArray with those elements.
   // Additionally takes the literals array of the surrounding function
@@ -499,8 +493,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateObjectLiteral(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -526,9 +519,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateObjectLiteralShallow(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -554,8 +545,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateArrayLiteral(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -574,9 +564,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateArrayLiteralShallow(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -599,9 +587,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateCatchExtensionObject(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateCatchExtensionObject) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[0]);
   Object* value = args[1];
@@ -625,8 +611,7 @@
 }
 
 
-static MaybeObject* Runtime_ClassOf(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   Object* obj = args[0];
@@ -635,8 +620,7 @@
 }
 
 
-static MaybeObject* Runtime_IsInPrototypeChain(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
@@ -652,8 +636,7 @@
 
 
 // Inserts an object as the hidden prototype of another object.
-static MaybeObject* Runtime_SetHiddenPrototype(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSObject, jsobject, args[0]);
@@ -695,11 +678,10 @@
 }
 
 
-static MaybeObject* Runtime_IsConstructCall(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
-  JavaScriptFrameIterator it;
+  JavaScriptFrameIterator it(isolate);
   return isolate->heap()->ToBoolean(it.frame()->IsConstructor());
 }
 
@@ -824,8 +806,7 @@
 //         [false, value, Writeable, Enumerable, Configurable]
 //  if args[1] is an accessor on args[0]
 //         [true, GetFunction, SetFunction, Enumerable, Configurable]
-static MaybeObject* Runtime_GetOwnProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
   ASSERT(args.length() == 2);
   Heap* heap = isolate->heap();
   HandleScope scope(isolate);
@@ -962,16 +943,14 @@
 }
 
 
-static MaybeObject* Runtime_PreventExtensions(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, obj, args[0]);
   return obj->PreventExtensions();
 }
 
 
-static MaybeObject* Runtime_IsExtensible(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, obj, args[0]);
   if (obj->IsJSGlobalProxy()) {
@@ -985,8 +964,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpCompile(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSRegExp, re, 0);
@@ -998,8 +976,7 @@
 }
 
 
-static MaybeObject* Runtime_CreateApiFunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
@@ -1007,8 +984,7 @@
 }
 
 
-static MaybeObject* Runtime_IsTemplate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) {
   ASSERT(args.length() == 1);
   Object* arg = args[0];
   bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
@@ -1016,8 +992,7 @@
 }
 
 
-static MaybeObject* Runtime_GetTemplateField(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(HeapObject, templ, args[0]);
   CONVERT_CHECKED(Smi, field, args[1]);
@@ -1036,8 +1011,7 @@
 }
 
 
-static MaybeObject* Runtime_DisableAccessChecks(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(HeapObject, object, args[0]);
   Map* old_map = object->map();
@@ -1057,8 +1031,7 @@
 }
 
 
-static MaybeObject* Runtime_EnableAccessChecks(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(HeapObject, object, args[0]);
   Map* old_map = object->map();
@@ -1089,8 +1062,7 @@
 }
 
 
-static MaybeObject* Runtime_DeclareGlobals(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
   ASSERT(args.length() == 4);
   HandleScope scope(isolate);
   Handle<GlobalObject> global = Handle<GlobalObject>(
@@ -1233,8 +1205,7 @@
 }
 
 
-static MaybeObject* Runtime_DeclareContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
@@ -1340,8 +1311,7 @@
 }
 
 
-static MaybeObject* Runtime_InitializeVarGlobal(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   NoHandleAllocation nha;
   // args[0] == name
   // args[1] == strict_mode
@@ -1436,8 +1406,7 @@
 }
 
 
-static MaybeObject* Runtime_InitializeConstGlobal(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
   // All constants are declared with an initial value. The name
   // of the constant is the first argument and the initial value
   // is the second.
@@ -1527,9 +1496,7 @@
 }
 
 
-static MaybeObject* Runtime_InitializeConstContextSlot(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
 
@@ -1636,9 +1603,8 @@
 }
 
 
-static MaybeObject* Runtime_OptimizeObjectForAddingMultipleProperties(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*,
+                 Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, object, 0);
@@ -1650,8 +1616,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpExec(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
@@ -1673,8 +1638,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpConstructResult(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) {
   ASSERT(args.length() == 3);
   CONVERT_SMI_CHECKED(elements_count, args[0]);
   if (elements_count > JSArray::kMaxFastElementsLength) {
@@ -1707,8 +1671,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpInitializeObject(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
   AssertNoAllocation no_alloc;
   ASSERT(args.length() == 5);
   CONVERT_CHECKED(JSRegExp, regexp, args[0]);
@@ -1774,9 +1737,7 @@
 }
 
 
-static MaybeObject* Runtime_FinishArrayPrototypeSetup(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSArray, prototype, 0);
@@ -1805,8 +1766,7 @@
 }
 
 
-static MaybeObject* Runtime_SpecialArrayFunctions(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, holder, 0);
@@ -1823,8 +1783,7 @@
 }
 
 
-static MaybeObject* Runtime_GetGlobalReceiver(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetGlobalReceiver) {
   // Returns a real global receiver, not one of builtins object.
   Context* global_context =
       isolate->context()->global()->global_context();
@@ -1832,9 +1791,7 @@
 }
 
 
-static MaybeObject* Runtime_MaterializeRegExpLiteral(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
   CONVERT_ARG_CHECKED(FixedArray, literals, 0);
@@ -1864,8 +1821,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetName(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1874,8 +1830,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetName(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1886,9 +1841,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionRemovePrototype(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1900,8 +1853,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScript) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -1913,8 +1865,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetSourceCode(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1923,9 +1874,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetScriptSourcePosition(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -1935,9 +1884,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetPositionForOffset(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
   ASSERT(args.length() == 2);
 
   CONVERT_CHECKED(Code, code, args[0]);
@@ -1950,9 +1897,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetInstanceClassName(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1963,8 +1908,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetLength(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1975,8 +1919,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionSetPrototype(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -1991,8 +1934,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionIsAPIFunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -2002,8 +1944,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionIsBuiltin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -2013,8 +1954,7 @@
 }
 
 
-static MaybeObject* Runtime_SetCode(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
@@ -2077,9 +2017,7 @@
 }
 
 
-static MaybeObject* Runtime_SetExpectedNumberOfProperties(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -2102,8 +2040,7 @@
 }
 
 
-static MaybeObject* Runtime_StringCharCodeAt(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -2139,8 +2076,7 @@
 }
 
 
-static MaybeObject* Runtime_CharFromCode(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   return CharFromCode(isolate, args[0]);
@@ -2874,9 +2810,7 @@
 }
 
 
-static MaybeObject* Runtime_StringReplaceRegExpWithString(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
   ASSERT(args.length() == 4);
 
   CONVERT_CHECKED(String, subject, args[0]);
@@ -2978,8 +2912,7 @@
 }
 
 
-static MaybeObject* Runtime_StringIndexOf(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) {
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
@@ -3031,8 +2964,7 @@
   return -1;
 }
 
-static MaybeObject* Runtime_StringLastIndexOf(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
@@ -3089,8 +3021,7 @@
 }
 
 
-static MaybeObject* Runtime_StringLocaleCompare(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3138,8 +3069,7 @@
 }
 
 
-static MaybeObject* Runtime_SubString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -3166,8 +3096,7 @@
 }
 
 
-static MaybeObject* Runtime_StringMatch(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
   ASSERT_EQ(3, args.length());
 
   CONVERT_ARG_CHECKED(String, subject, 0);
@@ -3533,8 +3462,7 @@
 }
 
 
-static MaybeObject* Runtime_RegExpExecMultiple(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
   ASSERT(args.length() == 4);
   HandleScope handles(isolate);
 
@@ -3589,8 +3517,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToRadixString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3629,8 +3556,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToFixed(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3655,8 +3581,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToExponential(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3681,8 +3606,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToPrecision(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3792,8 +3716,7 @@
 }
 
 
-static MaybeObject* Runtime_GetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3805,8 +3728,7 @@
 
 
 // KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
-static MaybeObject* Runtime_KeyedGetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -3880,9 +3802,7 @@
 // Steps 9c & 12 - replace an existing data property with an accessor property.
 // Step 12 - update an existing accessor property with an accessor or generic
 //           descriptor.
-static MaybeObject* Runtime_DefineOrRedefineAccessorProperty(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
   ASSERT(args.length() == 5);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -3919,9 +3839,7 @@
 // Steps 9b & 12 - replace an existing accessor property with a data property.
 // Step 12 - update an existing data property with a data or generic
 //           descriptor.
-static MaybeObject* Runtime_DefineOrRedefineDataProperty(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
   ASSERT(args.length() == 4);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSObject, js_object, 0);
@@ -4157,8 +4075,7 @@
 }
 
 
-static MaybeObject* Runtime_SetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
 
@@ -4191,9 +4108,7 @@
 
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
-static MaybeObject* Runtime_IgnoreAttributesAndSetProperty(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
   CONVERT_CHECKED(JSObject, object, args[0]);
@@ -4214,8 +4129,7 @@
 }
 
 
-static MaybeObject* Runtime_DeleteProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -4246,8 +4160,7 @@
 }
 
 
-static MaybeObject* Runtime_HasLocalProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[1]);
@@ -4277,8 +4190,7 @@
 }
 
 
-static MaybeObject* Runtime_HasProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
 
@@ -4292,8 +4204,7 @@
 }
 
 
-static MaybeObject* Runtime_HasElement(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
 
@@ -4308,8 +4219,7 @@
 }
 
 
-static MaybeObject* Runtime_IsPropertyEnumerable(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -4326,8 +4236,7 @@
 }
 
 
-static MaybeObject* Runtime_GetPropertyNames(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, object, 0);
@@ -4340,8 +4249,7 @@
 // all enumerable properties of the object and its prototypes
 // have none, the map of the object. This is used to speed up
 // the check for deletions during a for-in.
-static MaybeObject* Runtime_GetPropertyNamesFast(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSObject, raw_object, args[0]);
@@ -4377,8 +4285,7 @@
 
 // Return the names of the local named properties.
 // args[0]: object
-static MaybeObject* Runtime_GetLocalPropertyNames(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   if (!args[0]->IsJSObject()) {
@@ -4464,8 +4371,7 @@
 
 // Return the names of the local indexed properties.
 // args[0]: object
-static MaybeObject* Runtime_GetLocalElementNames(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalElementNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   if (!args[0]->IsJSObject()) {
@@ -4482,8 +4388,7 @@
 
 // Return information on whether an object has a named or indexed interceptor.
 // args[0]: object
-static MaybeObject* Runtime_GetInterceptorInfo(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetInterceptorInfo) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   if (!args[0]->IsJSObject()) {
@@ -4501,9 +4406,7 @@
 
 // Return property names from named interceptor.
 // args[0]: object
-static MaybeObject* Runtime_GetNamedInterceptorPropertyNames(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4518,9 +4421,7 @@
 
 // Return element names from indexed interceptor.
 // args[0]: object
-static MaybeObject* Runtime_GetIndexedInterceptorElementNames(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -4533,8 +4434,7 @@
 }
 
 
-static MaybeObject* Runtime_LocalKeys(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
   ASSERT_EQ(args.length(), 1);
   CONVERT_CHECKED(JSObject, raw_object, args[0]);
   HandleScope scope(isolate);
@@ -4579,13 +4479,12 @@
 }
 
 
-static MaybeObject* Runtime_GetArgumentsProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
   // Compute the frame holding the arguments.
-  JavaScriptFrameIterator it;
+  JavaScriptFrameIterator it(isolate);
   it.AdvanceToArgumentsFrame();
   JavaScriptFrame* frame = it.frame();
 
@@ -4633,8 +4532,7 @@
 }
 
 
-static MaybeObject* Runtime_ToFastProperties(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 1);
@@ -4650,8 +4548,7 @@
 }
 
 
-static MaybeObject* Runtime_ToSlowProperties(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 1);
@@ -4664,8 +4561,7 @@
 }
 
 
-static MaybeObject* Runtime_ToBool(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -4675,8 +4571,7 @@
 
 // Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
 // Possible optimizations: put the type string into the oddballs.
-static MaybeObject* Runtime_Typeof(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
   NoHandleAllocation ha;
 
   Object* obj = args[0];
@@ -4735,8 +4630,7 @@
 }
 
 
-static MaybeObject* Runtime_StringToNumber(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, subject, args[0]);
@@ -4786,13 +4680,12 @@
   }
 
   // Slower case.
-  return isolate->heap()->NumberFromDouble(StringToDouble(subject, ALLOW_HEX));
+  return isolate->heap()->NumberFromDouble(
+      StringToDouble(isolate->unicode_cache(), subject, ALLOW_HEX));
 }
 
 
-static MaybeObject* Runtime_StringFromCharCodeArray(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -4872,8 +4765,7 @@
 }
 
 
-static MaybeObject* Runtime_URIEscape(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
   const char hex_chars[] = "0123456789ABCDEF";
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
@@ -4992,8 +4884,7 @@
 }
 
 
-static MaybeObject* Runtime_URIUnescape(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, source, args[0]);
@@ -5237,8 +5128,7 @@
 }
 
 
-static MaybeObject* Runtime_QuoteJSONString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
   NoHandleAllocation ha;
   CONVERT_CHECKED(String, str, args[0]);
   if (!str->IsFlat()) {
@@ -5260,8 +5150,7 @@
 }
 
 
-static MaybeObject* Runtime_QuoteJSONStringComma(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
   NoHandleAllocation ha;
   CONVERT_CHECKED(String, str, args[0]);
   if (!str->IsFlat()) {
@@ -5282,8 +5171,7 @@
   }
 }
 
-static MaybeObject* Runtime_StringParseInt(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
   NoHandleAllocation ha;
 
   CONVERT_CHECKED(String, s, args[0]);
@@ -5292,18 +5180,18 @@
   s->TryFlatten();
 
   RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
-  double value = StringToInt(s, radix);
+  double value = StringToInt(isolate->unicode_cache(), s, radix);
   return isolate->heap()->NumberFromDouble(value);
 }
 
 
-static MaybeObject* Runtime_StringParseFloat(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
   NoHandleAllocation ha;
   CONVERT_CHECKED(String, str, args[0]);
 
   // ECMA-262 section 15.1.2.3, empty string is NaN
-  double value = StringToDouble(str, ALLOW_TRAILING_JUNK, OS::nan_value());
+  double value = StringToDouble(isolate->unicode_cache(),
+                                str, ALLOW_TRAILING_JUNK, OS::nan_value());
 
   // Create a number object from the value.
   return isolate->heap()->NumberFromDouble(value);
@@ -5589,15 +5477,13 @@
 }
 
 
-static MaybeObject* Runtime_StringToLowerCase(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToLowerCase) {
   return ConvertCase<ToLowerTraits>(
       args, isolate, isolate->runtime_state()->to_lower_mapping());
 }
 
 
-static MaybeObject* Runtime_StringToUpperCase(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToUpperCase) {
   return ConvertCase<ToUpperTraits>(
       args, isolate, isolate->runtime_state()->to_upper_mapping());
 }
@@ -5608,8 +5494,7 @@
 }
 
 
-static MaybeObject* Runtime_StringTrim(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -5659,8 +5544,7 @@
 }
 
 
-static MaybeObject* Runtime_StringSplit(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
   ASSERT(args.length() == 3);
   HandleScope handle_scope(isolate);
   CONVERT_ARG_CHECKED(String, subject, 0);
@@ -5791,8 +5675,7 @@
 
 // Converts a String to JSArray.
 // For example, "foo" => ["f", "o", "o"].
-static MaybeObject* Runtime_StringToArray(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(String, s, 0);
@@ -5840,8 +5723,7 @@
 }
 
 
-static MaybeObject* Runtime_NewStringWrapper(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(String, value, args[0]);
@@ -5856,8 +5738,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5868,9 +5749,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToStringSkipCache(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5881,8 +5760,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToInteger(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5896,9 +5774,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToIntegerMapMinusZero(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5917,8 +5793,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToJSUint32(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5927,8 +5802,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberToJSInt32(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5944,8 +5818,7 @@
 
 // Converts a Number to a Smi, if possible. Returns NaN if the number is not
 // a small integer.
-static MaybeObject* Runtime_NumberToSmi(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -5964,16 +5837,14 @@
 }
 
 
-static MaybeObject* Runtime_AllocateHeapNumber(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
   return isolate->heap()->AllocateHeapNumber(0);
 }
 
 
-static MaybeObject* Runtime_NumberAdd(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -5983,8 +5854,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberSub(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -5994,8 +5864,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberMul(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6005,8 +5874,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberUnaryMinus(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -6015,8 +5883,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberAlloc(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
 
@@ -6024,8 +5891,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberDiv(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6035,8 +5901,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberMod(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6049,8 +5914,7 @@
 }
 
 
-static MaybeObject* Runtime_StringAdd(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, str1, args[0]);
@@ -6099,8 +5963,7 @@
 }
 
 
-static MaybeObject* Runtime_StringBuilderConcat(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSArray, array, args[0]);
@@ -6213,8 +6076,7 @@
 }
 
 
-static MaybeObject* Runtime_StringBuilderJoin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSArray, array, args[0]);
@@ -6298,8 +6160,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberOr(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6309,8 +6170,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberAnd(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6320,8 +6180,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberXor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6331,8 +6190,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberNot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -6341,8 +6199,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberShl(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6352,8 +6209,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberShr(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6363,8 +6219,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberSar(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6374,8 +6229,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberEquals(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6394,8 +6248,7 @@
 }
 
 
-static MaybeObject* Runtime_StringEquals(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6413,8 +6266,7 @@
 }
 
 
-static MaybeObject* Runtime_NumberCompare(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -6429,9 +6281,7 @@
 
 // Compare two Smis as if they were converted to strings and then
 // compared lexicographically.
-static MaybeObject* Runtime_SmiLexicographicCompare(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6554,8 +6404,7 @@
 }
 
 
-static MaybeObject* Runtime_StringCompare(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -6590,8 +6439,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_acos(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_acos()->Increment();
@@ -6601,8 +6449,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_asin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_asin()->Increment();
@@ -6612,8 +6459,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_atan(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_atan()->Increment();
@@ -6626,8 +6472,7 @@
 static const double kPiDividedBy4 = 0.78539816339744830962;
 
 
-static MaybeObject* Runtime_Math_atan2(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   isolate->counters()->math_atan2()->Increment();
@@ -6650,8 +6495,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_ceil(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_ceil()->Increment();
@@ -6661,8 +6505,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_cos(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_cos()->Increment();
@@ -6672,8 +6515,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_exp(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_exp()->Increment();
@@ -6683,8 +6525,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_floor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_floor()->Increment();
@@ -6694,8 +6535,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_log(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_log()->Increment();
@@ -6705,8 +6545,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_pow(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   isolate->counters()->math_pow()->Increment();
@@ -6726,8 +6565,7 @@
 
 // Fast version of Math.pow if we know that y is not an integer and
 // y is not -0.5 or 0.5. Used as slowcase from codegen.
-static MaybeObject* Runtime_Math_pow_cfunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_DOUBLE_CHECKED(x, args[0]);
@@ -6742,8 +6580,7 @@
 }
 
 
-static MaybeObject* Runtime_RoundNumber(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_round()->Increment();
@@ -6760,9 +6597,16 @@
   int exponent = number->get_exponent();
   int sign = number->get_sign();
 
-  // We compare with kSmiValueSize - 3 because (2^30 - 0.1) has exponent 29 and
-  // should be rounded to 2^30, which is not smi.
-  if (!sign && exponent <= kSmiValueSize - 3) {
+  if (exponent < -1) {
+    // Number in range ]-0.5..0.5[. These always round to +/-zero.
+    if (sign) return isolate->heap()->minus_zero_value();
+    return Smi::FromInt(0);
+  }
+
+  // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
+  // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
+  // agument holds for 32-bit smis).
+  if (!sign && exponent < kSmiValueSize - 2) {
     return Smi::FromInt(static_cast<int>(value + 0.5));
   }
 
@@ -6779,8 +6623,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_sin(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_sin()->Increment();
@@ -6790,8 +6633,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_sqrt(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_sqrt()->Increment();
@@ -6801,8 +6643,7 @@
 }
 
 
-static MaybeObject* Runtime_Math_tan(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   isolate->counters()->math_tan()->Increment();
@@ -6857,8 +6698,7 @@
 }
 
 
-static MaybeObject* Runtime_DateMakeDay(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -7157,8 +6997,7 @@
 }
 
 
-static MaybeObject* Runtime_DateYMDFromTime(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -7181,8 +7020,7 @@
 }
 
 
-static MaybeObject* Runtime_NewArgumentsFast(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
@@ -7218,8 +7056,7 @@
 }
 
 
-static MaybeObject* Runtime_NewClosure(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(Context, context, 0);
@@ -7238,42 +7075,69 @@
   return *result;
 }
 
-static MaybeObject* Runtime_NewObjectFromBound(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+
+static SmartPointer<Object**> GetNonBoundArguments(int bound_argc,
+                                                   int* total_argc) {
+  // Find frame containing arguments passed to the caller.
+  JavaScriptFrameIterator it;
+  JavaScriptFrame* frame = it.frame();
+  List<JSFunction*> functions(2);
+  frame->GetFunctions(&functions);
+  if (functions.length() > 1) {
+    int inlined_frame_index = functions.length() - 1;
+    JSFunction* inlined_function = functions[inlined_frame_index];
+    int args_count = inlined_function->shared()->formal_parameter_count();
+    ScopedVector<SlotRef> args_slots(args_count);
+    SlotRef::ComputeSlotMappingForArguments(frame,
+                                            inlined_frame_index,
+                                            &args_slots);
+
+    *total_argc = bound_argc + args_count;
+    SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    for (int i = 0; i < args_count; i++) {
+      Handle<Object> val = args_slots[i].GetValue();
+      param_data[bound_argc + i] = val.location();
+    }
+    return param_data;
+  } else {
+    it.AdvanceToArgumentsFrame();
+    frame = it.frame();
+    int args_count = frame->ComputeParametersCount();
+
+    *total_argc = bound_argc + args_count;
+    SmartPointer<Object**> param_data(NewArray<Object**>(*total_argc));
+    for (int i = 0; i < args_count; i++) {
+      Handle<Object> val = Handle<Object>(frame->GetParameter(i));
+      param_data[bound_argc + i] = val.location();
+    }
+    return param_data;
+  }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   // First argument is a function to use as a constructor.
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
 
   // Second argument is either null or an array of bound arguments.
-  FixedArray* bound_args = NULL;
+  Handle<FixedArray> bound_args;
   int bound_argc = 0;
   if (!args[1]->IsNull()) {
     CONVERT_ARG_CHECKED(JSArray, params, 1);
     RUNTIME_ASSERT(params->HasFastElements());
-    bound_args = FixedArray::cast(params->elements());
+    bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
     bound_argc = Smi::cast(params->length())->value();
   }
 
-  // Find frame containing arguments passed to the caller.
-  JavaScriptFrameIterator it;
-  JavaScriptFrame* frame = it.frame();
-  ASSERT(!frame->is_optimized());
-  it.AdvanceToArgumentsFrame();
-  frame = it.frame();
-  int argc = frame->ComputeParametersCount();
-
-  // Prepend bound arguments to caller's arguments.
-  int total_argc = bound_argc + argc;
-  SmartPointer<Object**> param_data(NewArray<Object**>(total_argc));
+  int total_argc = 0;
+  SmartPointer<Object**> param_data =
+      GetNonBoundArguments(bound_argc, &total_argc);
   for (int i = 0; i < bound_argc; i++) {
     Handle<Object> val = Handle<Object>(bound_args->get(i));
     param_data[i] = val.location();
   }
-  for (int i = 0; i < argc; i++) {
-    Handle<Object> val = Handle<Object>(frame->GetParameter(i));
-    param_data[bound_argc + i] = val.location();
-  }
 
   bool exception = false;
   Handle<Object> result =
@@ -7304,8 +7168,7 @@
 }
 
 
-static MaybeObject* Runtime_NewObject(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -7385,8 +7248,7 @@
 }
 
 
-static MaybeObject* Runtime_FinalizeInstanceSize(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FinalizeInstanceSize) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -7398,8 +7260,7 @@
 }
 
 
-static MaybeObject* Runtime_LazyCompile(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -7430,8 +7291,7 @@
 }
 
 
-static MaybeObject* Runtime_LazyRecompile(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   Handle<JSFunction> function = args.at<JSFunction>(0);
@@ -7462,8 +7322,7 @@
 }
 
 
-static MaybeObject* Runtime_NotifyDeoptimized(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(args[0]->IsSmi());
@@ -7473,15 +7332,14 @@
   ASSERT(isolate->heap()->IsAllocationAllowed());
   int frames = deoptimizer->output_count();
 
-  JavaScriptFrameIterator it;
-  JavaScriptFrame* frame = NULL;
-  for (int i = 0; i < frames; i++) {
-    if (i != 0) it.Advance();
-    frame = it.frame();
-    deoptimizer->InsertHeapNumberValues(frames - i - 1, frame);
-  }
+  deoptimizer->MaterializeHeapNumbers();
   delete deoptimizer;
 
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = NULL;
+  for (int i = 0; i < frames - 1; i++) it.Advance();
+  frame = it.frame();
+
   RUNTIME_ASSERT(frame->function()->IsJSFunction());
   Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
   Handle<Object> arguments;
@@ -7537,16 +7395,14 @@
 }
 
 
-static MaybeObject* Runtime_NotifyOSR(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) {
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   delete deoptimizer;
   return isolate->heap()->undefined_value();
 }
 
 
-static MaybeObject* Runtime_DeoptimizeFunction(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -7558,9 +7414,17 @@
 }
 
 
-static MaybeObject* Runtime_CompileForOnStackReplacement(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
+  function->MarkForLazyRecompilation();
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -7579,7 +7443,7 @@
     // indirectly recursive and (b) an optimized invocation has been
     // deoptimized so that we are currently in an unoptimized activation.
     // Check for optimized activations of this function.
-    JavaScriptFrameIterator it;
+    JavaScriptFrameIterator it(isolate);
     while (succeeded && !it.done()) {
       JavaScriptFrame* frame = it.frame();
       succeeded = !frame->is_optimized() || frame->function() != *function;
@@ -7591,10 +7455,10 @@
   if (succeeded) {
     // The top JS function is this one, the PC is somewhere in the
     // unoptimized code.
-    JavaScriptFrameIterator it;
+    JavaScriptFrameIterator it(isolate);
     JavaScriptFrame* frame = it.frame();
     ASSERT(frame->function() == *function);
-    ASSERT(frame->LookupCode(isolate) == *unoptimized);
+    ASSERT(frame->LookupCode() == *unoptimized);
     ASSERT(unoptimized->contains(frame->pc()));
 
     // Use linear search of the unoptimized code's stack check table to find
@@ -7674,8 +7538,7 @@
 }
 
 
-static MaybeObject* Runtime_GetFunctionDelegate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionDelegate) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(!args[0]->IsJSFunction());
@@ -7683,8 +7546,7 @@
 }
 
 
-static MaybeObject* Runtime_GetConstructorDelegate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(!args[0]->IsJSFunction());
@@ -7692,8 +7554,7 @@
 }
 
 
-static MaybeObject* Runtime_NewContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -7744,24 +7605,21 @@
 }
 
 
-static MaybeObject* Runtime_PushContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   return PushContextHelper(isolate, args[0], false);
 }
 
 
-static MaybeObject* Runtime_PushCatchContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
   return PushContextHelper(isolate, args[0], true);
 }
 
 
-static MaybeObject* Runtime_DeleteContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
@@ -7921,21 +7779,17 @@
 }
 
 
-static ObjectPair Runtime_LoadContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlot) {
   return LoadContextSlotHelper(args, isolate, true);
 }
 
 
-static ObjectPair Runtime_LoadContextSlotNoReferenceError(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_LoadContextSlotNoReferenceError) {
   return LoadContextSlotHelper(args, isolate, false);
 }
 
 
-static MaybeObject* Runtime_StoreContextSlot(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreContextSlot) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
@@ -7985,8 +7839,17 @@
     // The property exists in the extension context.
     context_ext = Handle<JSObject>::cast(holder);
   } else {
-    // The property was not found. It needs to be stored in the global context.
+    // The property was not found.
     ASSERT(attributes == ABSENT);
+
+    if (strict_mode == kStrictMode) {
+      // Throw in strict mode (assignment to undefined variable).
+      Handle<Object> error =
+        isolate->factory()->NewReferenceError(
+            "not_defined", HandleVector(&name, 1));
+      return isolate->Throw(*error);
+    }
+    // In non-strict mode, the property is stored in the global context.
     attributes = NONE;
     context_ext = Handle<JSObject>(isolate->context()->global());
   }
@@ -8009,8 +7872,7 @@
 }
 
 
-static MaybeObject* Runtime_Throw(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Throw) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -8018,8 +7880,7 @@
 }
 
 
-static MaybeObject* Runtime_ReThrow(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -8027,16 +7888,13 @@
 }
 
 
-static MaybeObject* Runtime_PromoteScheduledException(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) {
   ASSERT_EQ(0, args.length());
   return isolate->PromoteScheduledException();
 }
 
 
-static MaybeObject* Runtime_ThrowReferenceError(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowReferenceError) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -8048,8 +7906,7 @@
 }
 
 
-static MaybeObject* Runtime_StackGuard(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
   ASSERT(args.length() == 0);
 
   // First check if this is a real stack overflow.
@@ -8148,8 +8005,7 @@
 }
 
 
-static MaybeObject* Runtime_TraceEnter(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
   ASSERT(args.length() == 0);
   NoHandleAllocation ha;
   PrintTransition(NULL);
@@ -8157,16 +8013,14 @@
 }
 
 
-static MaybeObject* Runtime_TraceExit(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
   NoHandleAllocation ha;
   PrintTransition(args[0]);
   return args[0];  // return TOS
 }
 
 
-static MaybeObject* Runtime_DebugPrint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -8174,7 +8028,7 @@
   if (args[0]->IsString()) {
     // If we have a string, assume it's a code "marker"
     // and print some interesting cpu debugging info.
-    JavaScriptFrameIterator it;
+    JavaScriptFrameIterator it(isolate);
     JavaScriptFrame* frame = it.frame();
     PrintF("fp = %p, sp = %p, caller_sp = %p: ",
            frame->fp(), frame->sp(), frame->caller_sp());
@@ -8197,8 +8051,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugTrace(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
   ASSERT(args.length() == 0);
   NoHandleAllocation ha;
   isolate->PrintStack();
@@ -8206,8 +8059,7 @@
 }
 
 
-static MaybeObject* Runtime_DateCurrentTime(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
 
@@ -8220,8 +8072,7 @@
 }
 
 
-static MaybeObject* Runtime_DateParseString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
@@ -8237,10 +8088,14 @@
   RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
   bool result;
   if (str->IsAsciiRepresentation()) {
-    result = DateParser::Parse(str->ToAsciiVector(), output_array);
+    result = DateParser::Parse(str->ToAsciiVector(),
+                               output_array,
+                               isolate->unicode_cache());
   } else {
     ASSERT(str->IsTwoByteRepresentation());
-    result = DateParser::Parse(str->ToUC16Vector(), output_array);
+    result = DateParser::Parse(str->ToUC16Vector(),
+                               output_array,
+                               isolate->unicode_cache());
   }
 
   if (result) {
@@ -8251,8 +8106,7 @@
 }
 
 
-static MaybeObject* Runtime_DateLocalTimezone(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -8262,8 +8116,7 @@
 }
 
 
-static MaybeObject* Runtime_DateLocalTimeOffset(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimeOffset) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
 
@@ -8271,9 +8124,7 @@
 }
 
 
-static MaybeObject* Runtime_DateDaylightSavingsOffset(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateDaylightSavingsOffset) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -8282,8 +8133,7 @@
 }
 
 
-static MaybeObject* Runtime_GlobalReceiver(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) {
   ASSERT(args.length() == 1);
   Object* global = args[0];
   if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
@@ -8291,7 +8141,7 @@
 }
 
 
-static MaybeObject* Runtime_ParseJson(RUNTIME_CALLING_CONVENTION) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
@@ -8306,8 +8156,7 @@
 }
 
 
-static MaybeObject* Runtime_CompileString(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, source, 0);
@@ -8346,9 +8195,7 @@
 }
 
 
-static ObjectPair Runtime_ResolvePossiblyDirectEval(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
   ASSERT(args.length() == 4);
 
   HandleScope scope(isolate);
@@ -8424,9 +8271,7 @@
 }
 
 
-static ObjectPair Runtime_ResolvePossiblyDirectEvalNoLookup(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
   ASSERT(args.length() == 4);
 
   HandleScope scope(isolate);
@@ -8449,9 +8294,7 @@
 }
 
 
-static MaybeObject* Runtime_SetNewFunctionAttributes(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNewFunctionAttributes) {
   // This utility adjusts the property attributes for newly created Function
   // object ("new Function(...)") by changing the map.
   // All it does is changing the prototype property to enumerable
@@ -8471,8 +8314,7 @@
 }
 
 
-static MaybeObject* Runtime_AllocateInNewSpace(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
   // Allocate a block of memory in NewSpace (filled with a filler).
   // Use as fallback for allocation in generated code when NewSpace
   // is full.
@@ -8497,8 +8339,7 @@
 // Push an object unto an array of objects if it is not already in the
 // array.  Returns true if the element was pushed on the stack and
 // false otherwise.
-static MaybeObject* Runtime_PushIfAbsent(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, array, args[0]);
   CONVERT_CHECKED(JSObject, element, args[1]);
@@ -8947,8 +8788,7 @@
  * TODO(581): Fix non-compliance for very large concatenations and update to
  * following the ECMAScript 5 specification.
  */
-static MaybeObject* Runtime_ArrayConcat(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
   ASSERT(args.length() == 1);
   HandleScope handle_scope(isolate);
 
@@ -9036,8 +8876,7 @@
 
 // This will not allocate (flatten the string), but it may run
 // very slowly for very deeply nested ConsStrings.  For debugging use only.
-static MaybeObject* Runtime_GlobalPrint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -9055,8 +8894,7 @@
 // and are followed by non-existing element. Does not change the length
 // property.
 // Returns the number of non-undefined elements collected.
-static MaybeObject* Runtime_RemoveArrayHoles(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSObject, object, args[0]);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
@@ -9065,8 +8903,7 @@
 
 
 // Move contents of argument 0 (an array) to argument 1 (an array)
-static MaybeObject* Runtime_MoveArrayContents(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, from, args[0]);
   CONVERT_CHECKED(JSArray, to, args[1]);
@@ -9093,9 +8930,7 @@
 
 
 // How many elements does this object/array have?
-static MaybeObject* Runtime_EstimateNumberOfElements(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, object, args[0]);
   HeapObject* elements = object->elements();
@@ -9109,8 +8944,7 @@
 }
 
 
-static MaybeObject* Runtime_SwapElements(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SwapElements) {
   HandleScope handle_scope(isolate);
 
   ASSERT_EQ(3, args.length());
@@ -9145,8 +8979,7 @@
 // intervals (pair of a negative integer (-start-1) followed by a
 // positive (length)) or undefined values.
 // Intervals can span over some keys that are not in the object.
-static MaybeObject* Runtime_GetArrayKeys(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSObject, array, 0);
@@ -9186,8 +9019,7 @@
 // to the way accessors are implemented, it is set for both the getter
 // and setter on the first call to DefineAccessor and ignored on
 // subsequent calls.
-static MaybeObject* Runtime_DefineAccessor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) {
   RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
   // Compute attributes.
   PropertyAttributes attributes = NONE;
@@ -9207,8 +9039,7 @@
 }
 
 
-static MaybeObject* Runtime_LookupAccessor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSObject, obj, args[0]);
   CONVERT_CHECKED(String, name, args[1]);
@@ -9218,8 +9049,7 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-static MaybeObject* Runtime_DebugBreak(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) {
   ASSERT(args.length() == 0);
   return Execution::DebugBreakHelper();
 }
@@ -9241,8 +9071,7 @@
 // args[0]: debug event listener function to set or null or undefined for
 //          clearing the event listener function
 // args[1]: object supplied during callback
-static MaybeObject* Runtime_SetDebugEventListener(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) {
   ASSERT(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsJSFunction() ||
                  args[0]->IsUndefined() ||
@@ -9255,8 +9084,7 @@
 }
 
 
-static MaybeObject* Runtime_Break(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) {
   ASSERT(args.length() == 0);
   isolate->stack_guard()->DebugBreak();
   return isolate->heap()->undefined_value();
@@ -9332,9 +9160,7 @@
 // 4: Setter function if defined
 // Items 2-4 are only filled if the property has either a getter or a setter
 // defined through __defineGetter__ and/or __defineSetter__.
-static MaybeObject* Runtime_DebugGetPropertyDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 2);
@@ -9434,8 +9260,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugGetProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 2);
@@ -9454,9 +9279,7 @@
 
 // Return the property type calculated from the property details.
 // args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyTypeFromDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(Smi, details, args[0]);
   PropertyType type = PropertyDetails(details).type();
@@ -9466,9 +9289,7 @@
 
 // Return the property attribute calculated from the property details.
 // args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyAttributesFromDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(Smi, details, args[0]);
   PropertyAttributes attributes = PropertyDetails(details).attributes();
@@ -9478,9 +9299,7 @@
 
 // Return the property insertion index calculated from the property details.
 // args[0]: smi with property details.
-static MaybeObject* Runtime_DebugPropertyIndexFromDetails(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(Smi, details, args[0]);
   int index = PropertyDetails(details).index();
@@ -9491,9 +9310,7 @@
 // Return property value from named interceptor.
 // args[0]: object
 // args[1]: property name
-static MaybeObject* Runtime_DebugNamedInterceptorPropertyValue(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -9508,9 +9325,7 @@
 // Return element value from indexed interceptor.
 // args[0]: object
 // args[1]: index
-static MaybeObject* Runtime_DebugIndexedInterceptorElementValue(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
@@ -9521,8 +9336,7 @@
 }
 
 
-static MaybeObject* Runtime_CheckExecutionState(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
   ASSERT(args.length() >= 1);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   // Check that the break id is valid.
@@ -9536,14 +9350,14 @@
 }
 
 
-static MaybeObject* Runtime_GetFrameCount(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
   // Check arguments.
   Object* result;
-  { MaybeObject* maybe_result = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
@@ -9554,7 +9368,7 @@
     // If there is no JavaScript stack frame count is 0.
     return Smi::FromInt(0);
   }
-  for (JavaScriptFrameIterator it(id); !it.done(); it.Advance()) n++;
+  for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) n++;
   return Smi::FromInt(n);
 }
 
@@ -9587,14 +9401,14 @@
 // Arguments name, value
 // Locals name, value
 // Return value if any
-static MaybeObject* Runtime_GetFrameDetails(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
@@ -9607,7 +9421,7 @@
     return heap->undefined_value();
   }
   int count = 0;
-  JavaScriptFrameIterator it(id);
+  JavaScriptFrameIterator it(isolate, id);
   for (; !it.done(); it.Advance()) {
     if (count == index) break;
     count++;
@@ -9615,7 +9429,7 @@
   if (it.done()) return heap->undefined_value();
 
   bool is_optimized_frame =
-      it.frame()->LookupCode(isolate)->kind() == Code::OPTIMIZED_FUNCTION;
+      it.frame()->LookupCode()->kind() == Code::OPTIMIZED_FUNCTION;
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
@@ -9630,7 +9444,7 @@
 
   // Find source position.
   int position =
-      it.frame()->LookupCode(isolate)->SourcePosition(it.frame()->pc());
+      it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
 
   // Check for constructor frame.
   bool constructor = it.frame()->IsConstructor();
@@ -9692,7 +9506,7 @@
   // to the frame information.
   Handle<Object> return_value = isolate->factory()->undefined_value();
   if (at_return) {
-    StackFrameIterator it2;
+    StackFrameIterator it2(isolate);
     Address internal_frame_sp = NULL;
     while (!it2.done()) {
       if (it2.frame()->is_internal()) {
@@ -10038,6 +9852,10 @@
       at_local_ = index < 0;
     } else if (context_->is_function_context()) {
       at_local_ = true;
+    } else if (context_->closure() != *function_) {
+      // The context_ is a with block from the outer function.
+      ASSERT(context_->has_extension());
+      at_local_ = true;
     }
   }
 
@@ -10211,21 +10029,21 @@
 };
 
 
-static MaybeObject* Runtime_GetScopeCount(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
-  JavaScriptFrameIterator it(id);
+  JavaScriptFrameIterator it(isolate, id);
   JavaScriptFrame* frame = it.frame();
 
   // Count the visible scopes.
@@ -10250,14 +10068,14 @@
 // The array returned contains the following information:
 // 0: Scope type
 // 1: Scope object
-static MaybeObject* Runtime_GetScopeDetails(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScopeDetails) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_CHECKED(Smi, wrapped_id, args[1]);
@@ -10265,7 +10083,7 @@
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
-  JavaScriptFrameIterator frame_it(id);
+  JavaScriptFrameIterator frame_it(isolate, id);
   JavaScriptFrame* frame = frame_it.frame();
 
   // Find the requested scope.
@@ -10292,8 +10110,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugPrintScopes(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
 
@@ -10309,14 +10126,14 @@
 }
 
 
-static MaybeObject* Runtime_GetThreadCount(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
   // Check arguments.
   Object* result;
-  { MaybeObject* maybe_result = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
@@ -10345,14 +10162,14 @@
 // The array returned contains the following information:
 // 0: Is current thread?
 // 1: Thread id
-static MaybeObject* Runtime_GetThreadDetails(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetThreadDetails) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
@@ -10367,8 +10184,7 @@
     details->set(kThreadDetailsCurrentThreadIndex,
                  isolate->heap()->true_value());
     details->set(kThreadDetailsThreadIdIndex,
-                 Smi::FromInt(
-                     isolate->thread_manager()->CurrentId()));
+                 Smi::FromInt(ThreadId::Current().ToInteger()));
   } else {
     // Find the thread with the requested index.
     int n = 1;
@@ -10385,7 +10201,8 @@
     // Fill the details.
     details->set(kThreadDetailsCurrentThreadIndex,
                  isolate->heap()->false_value());
-    details->set(kThreadDetailsThreadIdIndex, Smi::FromInt(thread->id()));
+    details->set(kThreadDetailsThreadIdIndex,
+                 Smi::FromInt(thread->id().ToInteger()));
   }
 
   // Convert to JS array and return.
@@ -10395,8 +10212,7 @@
 
 // Sets the disable break state
 // args[0]: disable break state
-static MaybeObject* Runtime_SetDisableBreak(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
@@ -10405,8 +10221,7 @@
 }
 
 
-static MaybeObject* Runtime_GetBreakLocations(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetBreakLocations) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
@@ -10425,8 +10240,7 @@
 // args[0]: function
 // args[1]: number: break source position (within the function source)
 // args[2]: number: break point object
-static MaybeObject* Runtime_SetFunctionBreakPoint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -10527,8 +10341,7 @@
 // args[0]: script to set break point in
 // args[1]: number: break source position (within the script source)
 // args[2]: number: break point object
-static MaybeObject* Runtime_SetScriptBreakPoint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
@@ -10562,8 +10375,7 @@
 
 // Clear a break point
 // args[0]: number: break point object
-static MaybeObject* Runtime_ClearBreakPoint(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   Handle<Object> break_point_object_arg = args.at<Object>(0);
@@ -10578,8 +10390,7 @@
 // Change the state of break on exceptions.
 // args[0]: Enum value indicating whether to affect caught/uncaught exceptions.
 // args[1]: Boolean indicating on/off.
-static MaybeObject* Runtime_ChangeBreakOnException(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ChangeBreakOnException) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsNumber());
@@ -10597,8 +10408,7 @@
 
 // Returns the state of break on exceptions
 // args[0]: boolean indicating uncaught exceptions
-static MaybeObject* Runtime_IsBreakOnException(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsBreakOnException) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   RUNTIME_ASSERT(args[0]->IsNumber());
@@ -10615,13 +10425,13 @@
 // args[1]: step action from the enumeration StepAction
 // args[2]: number of times to perform the step, for step out it is the number
 //          of frames to step down.
-static MaybeObject* Runtime_PrepareStep(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   // Check arguments.
   Object* check;
-  { MaybeObject* maybe_check = Runtime_CheckExecutionState(args, isolate);
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
   if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
@@ -10655,8 +10465,7 @@
 
 
 // Clear all stepping set by PrepareStep.
-static MaybeObject* Runtime_ClearStepping(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearStepping) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
   isolate->debug()->ClearStepping();
@@ -10676,7 +10485,7 @@
   // Recursively copy the with contexts.
   Handle<Context> previous(context_chain->previous());
   Handle<JSObject> extension(JSObject::cast(context_chain->extension()));
-  Handle<Context> context = CopyWithContextChain(function_context, previous);
+  Handle<Context> context = CopyWithContextChain(previous, function_context);
   return context->GetIsolate()->factory()->NewWithContext(
       context, extension, context_chain->IsCatchContext());
 }
@@ -10739,16 +10548,15 @@
 // stack frame presenting the same view of the values of parameters and
 // local variables as if the piece of JavaScript was evaluated at the point
 // where the function on the stack frame is currently stopped.
-static MaybeObject* Runtime_DebugEvaluate(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
   HandleScope scope(isolate);
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
   ASSERT(args.length() == 5);
   Object* check_result;
-  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args,
-                                                                  isolate);
+  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check_result->ToObject(&check_result)) {
       return maybe_check_result;
     }
@@ -10763,7 +10571,7 @@
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
-  JavaScriptFrameIterator it(id);
+  JavaScriptFrameIterator it(isolate, id);
   JavaScriptFrame* frame = it.frame();
   Handle<JSFunction> function(JSFunction::cast(frame->function()));
   Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
@@ -10867,16 +10675,15 @@
 }
 
 
-static MaybeObject* Runtime_DebugEvaluateGlobal(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
   HandleScope scope(isolate);
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
   ASSERT(args.length() == 4);
   Object* check_result;
-  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(args,
-                                                                  isolate);
+  { MaybeObject* maybe_check_result = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check_result->ToObject(&check_result)) {
       return maybe_check_result;
     }
@@ -10939,8 +10746,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugGetLoadedScripts(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetLoadedScripts) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 0);
 
@@ -11041,8 +10847,7 @@
 // args[0]: the object to find references to
 // args[1]: constructor function for instances to exclude (Mirror)
 // args[2]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugReferencedBy(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
@@ -11122,8 +10927,7 @@
 // Scan the heap for objects constructed by a specific function.
 // args[0]: the constructor to find instances of
 // args[1]: the the maximum number of objects to return
-static MaybeObject* Runtime_DebugConstructedBy(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
@@ -11161,8 +10965,7 @@
 
 // Find the effective prototype object as returned by __proto__.
 // args[0]: the object to find the prototype for.
-static MaybeObject* Runtime_DebugGetPrototype(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
   ASSERT(args.length() == 1);
 
   CONVERT_CHECKED(JSObject, obj, args[0]);
@@ -11172,17 +10975,14 @@
 }
 
 
-static MaybeObject* Runtime_SystemBreak(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
   ASSERT(args.length() == 0);
   CPU::DebugBreak();
   return isolate->heap()->undefined_value();
 }
 
 
-static MaybeObject* Runtime_DebugDisassembleFunction(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) {
 #ifdef DEBUG
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -11198,9 +10998,7 @@
 }
 
 
-static MaybeObject* Runtime_DebugDisassembleConstructor(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
 #ifdef DEBUG
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
@@ -11216,9 +11014,7 @@
 }
 
 
-static MaybeObject* Runtime_FunctionGetInferredName(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
@@ -11254,9 +11050,8 @@
 // For a script finds all SharedFunctionInfo's in the heap that points
 // to this script. Returns JSArray of SharedFunctionInfo wrapped
 // in OpaqueReferences.
-static MaybeObject* Runtime_LiveEditFindSharedFunctionInfosForScript(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*,
+                 Runtime_LiveEditFindSharedFunctionInfosForScript) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, script_value, args[0]);
@@ -11288,9 +11083,7 @@
 // Returns a JSArray of compilation infos. The array is ordered so that
 // each function with all its descendant is always stored in a continues range
 // with the function itself going first. The root function is a script function.
-static MaybeObject* Runtime_LiveEditGatherCompileInfo(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, script, args[0]);
@@ -11309,8 +11102,7 @@
 // Changes the source of the script to a new_source.
 // If old_script_name is provided (i.e. is a String), also creates a copy of
 // the script with its original source and sends notification to debugger.
-static MaybeObject* Runtime_LiveEditReplaceScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, original_script_value, args[0]);
@@ -11334,9 +11126,7 @@
 }
 
 
-static MaybeObject* Runtime_LiveEditFunctionSourceUpdated(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
@@ -11345,9 +11135,7 @@
 
 
 // Replaces code of SharedFunctionInfo with a new one.
-static MaybeObject* Runtime_LiveEditReplaceFunctionCode(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
@@ -11357,9 +11145,7 @@
 }
 
 // Connects SharedFunctionInfo to another script.
-static MaybeObject* Runtime_LiveEditFunctionSetScript(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   Handle<Object> function_object(args[0], isolate);
@@ -11384,9 +11170,7 @@
 
 // In a code of a parent function replaces original function as embedded object
 // with a substitution one.
-static MaybeObject* Runtime_LiveEditReplaceRefToNestedFunction(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
 
@@ -11406,9 +11190,7 @@
 // array of groups of 3 numbers:
 // (change_begin, change_end, change_end_new_position).
 // Each group describes a change in text; groups are sorted by change_begin.
-static MaybeObject* Runtime_LiveEditPatchFunctionPositions(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
@@ -11422,9 +11204,7 @@
 // checks that none of them have activations on stacks (of any thread).
 // Returns array of the same length with corresponding results of
 // LiveEdit::FunctionPatchabilityStatus type.
-static MaybeObject* Runtime_LiveEditCheckAndDropActivations(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
@@ -11436,8 +11216,7 @@
 // Compares 2 strings line-by-line, then token-wise and returns diff in form
 // of JSArray of triplets (pos1, pos1_end, pos2_end) describing list
 // of diff chunks.
-static MaybeObject* Runtime_LiveEditCompareStrings(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(String, s1, 0);
@@ -11449,9 +11228,7 @@
 
 // A testing entry. Returns statement position which is the closest to
 // source_position.
-static MaybeObject* Runtime_GetFunctionCodePositionFromSource(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -11488,8 +11265,7 @@
 // Calls specified function with or without entering the debugger.
 // This is used in unit tests to run code as if debugger is entered or simply
 // to have a stack with C++ frame in the middle.
-static MaybeObject* Runtime_ExecuteInDebugContext(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -11516,8 +11292,7 @@
 
 
 // Sets a v8 flag.
-static MaybeObject* Runtime_SetFlags(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
   CONVERT_CHECKED(String, arg, args[0]);
   SmartPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -11528,16 +11303,14 @@
 
 // Performs a GC.
 // Presently, it only does a full GC.
-static MaybeObject* Runtime_CollectGarbage(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
   isolate->heap()->CollectAllGarbage(true);
   return isolate->heap()->undefined_value();
 }
 
 
 // Gets the current heap usage.
-static MaybeObject* Runtime_GetHeapUsage(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
   int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
   if (!Smi::IsValid(usage)) {
     return *isolate->factory()->NewNumberFromInt(usage);
@@ -11547,8 +11320,7 @@
 
 
 // Captures a live object list from the present heap.
-static MaybeObject* Runtime_HasLOLEnabled(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) {
 #ifdef LIVE_OBJECT_LIST
   return isolate->heap()->true_value();
 #else
@@ -11558,8 +11330,7 @@
 
 
 // Captures a live object list from the present heap.
-static MaybeObject* Runtime_CaptureLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) {
 #ifdef LIVE_OBJECT_LIST
   return LiveObjectList::Capture();
 #else
@@ -11569,8 +11340,7 @@
 
 
 // Deletes the specified live object list.
-static MaybeObject* Runtime_DeleteLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
 #ifdef LIVE_OBJECT_LIST
   CONVERT_SMI_CHECKED(id, args[0]);
   bool success = LiveObjectList::Delete(id);
@@ -11587,8 +11357,7 @@
 // specified by id1 and id2.
 // If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
 // dumped.
-static MaybeObject* Runtime_DumpLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(id1, args[0]);
@@ -11606,8 +11375,7 @@
 
 // Gets the specified object as requested by the debugger.
 // This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObj(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
 #ifdef LIVE_OBJECT_LIST
   CONVERT_SMI_CHECKED(obj_id, args[0]);
   Object* result = LiveObjectList::GetObj(obj_id);
@@ -11620,8 +11388,7 @@
 
 // Gets the obj id for the specified address if valid.
 // This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_GetLOLObjId(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_ARG_CHECKED(String, address, 0);
@@ -11634,8 +11401,7 @@
 
 
 // Gets the retainers that references the specified object alive.
-static MaybeObject* Runtime_GetLOLObjRetainers(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(obj_id, args[0]);
@@ -11675,8 +11441,7 @@
 
 
 // Gets the reference path between 2 objects.
-static MaybeObject* Runtime_GetLOLPath(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(obj_id1, args[0]);
@@ -11699,8 +11464,7 @@
 
 // Generates the response to a debugger request for a list of all
 // previously captured live object lists.
-static MaybeObject* Runtime_InfoLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
 #ifdef LIVE_OBJECT_LIST
   CONVERT_SMI_CHECKED(start, args[0]);
   CONVERT_SMI_CHECKED(count, args[1]);
@@ -11713,8 +11477,7 @@
 
 // Gets a dump of the specified object as requested by the debugger.
 // This is only used for obj ids shown in live object lists.
-static MaybeObject* Runtime_PrintLOLObj(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(obj_id, args[0]);
@@ -11727,8 +11490,7 @@
 
 
 // Resets and releases all previously captured live object lists.
-static MaybeObject* Runtime_ResetLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) {
 #ifdef LIVE_OBJECT_LIST
   LiveObjectList::Reset();
   return isolate->heap()->undefined_value();
@@ -11743,8 +11505,7 @@
 // specified by id1 and id2.
 // If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
 // summarized.
-static MaybeObject* Runtime_SummarizeLOL(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
   CONVERT_SMI_CHECKED(id1, args[0]);
@@ -11762,8 +11523,7 @@
 
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
-static MaybeObject* Runtime_ProfilerResume(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -11774,8 +11534,7 @@
 }
 
 
-static MaybeObject* Runtime_ProfilerPause(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
@@ -11822,8 +11581,7 @@
 // Get the script object from script data. NOTE: Regarding performance
 // see the NOTE for GetScriptFromScriptData.
 // args[0]: script data for the script to find the source for
-static MaybeObject* Runtime_GetScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
   HandleScope scope(isolate);
 
   ASSERT(args.length() == 1);
@@ -11868,8 +11626,7 @@
 // Collect the raw data for a stack trace.  Returns an array of 4
 // element segments each containing a receiver, function, code and
 // native code offset.
-static MaybeObject* Runtime_CollectStackTrace(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
   ASSERT_EQ(args.length(), 2);
   Handle<Object> caller = args.at<Object>(0);
   CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
@@ -11882,7 +11639,7 @@
   Handle<FixedArray> elements =
       factory->NewFixedArrayWithHoles(initial_size * 4);
 
-  StackFrameIterator iter;
+  StackFrameIterator iter(isolate);
   // If the caller parameter is a function we skip frames until we're
   // under it before starting to collect.
   bool seen_caller = !caller->IsJSFunction();
@@ -11893,7 +11650,9 @@
     if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
       frames_seen++;
       JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-      List<FrameSummary> frames(3);  // Max 2 levels of inlining.
+      // Set initial size to the maximum inlining level + 1 for the outermost
+      // function.
+      List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
       frame->Summarize(&frames);
       for (int i = frames.length() - 1; i >= 0; i--) {
         if (cursor + 4 > elements->length()) {
@@ -11926,8 +11685,7 @@
 
 
 // Returns V8 version as a string.
-static MaybeObject* Runtime_GetV8Version(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
   ASSERT_EQ(args.length(), 0);
 
   NoHandleAllocation ha;
@@ -11939,8 +11697,7 @@
 }
 
 
-static MaybeObject* Runtime_Abort(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
   ASSERT(args.length() == 2);
   OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
                                     Smi::cast(args[1])->value());
@@ -11951,8 +11708,7 @@
 }
 
 
-static MaybeObject* Runtime_GetFromCache(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
   // This is only called from codegen, so checks might be more lax.
   CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
   Object* key = args[1];
@@ -12044,8 +11800,7 @@
 }
 
 
-static MaybeObject* Runtime_NewMessageObject(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
   HandleScope scope(isolate);
   CONVERT_ARG_CHECKED(String, type, 0);
   CONVERT_ARG_CHECKED(JSArray, arguments, 1);
@@ -12060,30 +11815,25 @@
 }
 
 
-static MaybeObject* Runtime_MessageGetType(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->type();
 }
 
 
-static MaybeObject* Runtime_MessageGetArguments(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->arguments();
 }
 
 
-static MaybeObject* Runtime_MessageGetStartPosition(
-    RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return Smi::FromInt(message->start_position());
 }
 
 
-static MaybeObject* Runtime_MessageGetScript(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
   CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->script();
 }
@@ -12092,8 +11842,7 @@
 #ifdef DEBUG
 // ListNatives is ONLY used by the fuzz-natives.js in debug mode
 // Exclude the code in release mode.
-static MaybeObject* Runtime_ListNatives(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
   ASSERT(args.length() == 0);
   HandleScope scope;
 #define COUNT_ENTRY(Name, argc, ressize) + 1
@@ -12137,8 +11886,7 @@
 #endif
 
 
-static MaybeObject* Runtime_Log(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, format, args[0]);
   CONVERT_CHECKED(JSArray, elms, args[1]);
@@ -12148,7 +11896,7 @@
 }
 
 
-static MaybeObject* Runtime_IS_VAR(RUNTIME_CALLING_CONVENTION) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IS_VAR) {
   UNREACHABLE();  // implemented as macro in the parser
   return NULL;
 }
diff --git a/src/runtime.h b/src/runtime.h
index 58062ca..bf1ba68 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -84,7 +84,8 @@
   F(LazyRecompile, 1, 1) \
   F(NotifyDeoptimized, 1, 1) \
   F(NotifyOSR, 0, 1) \
-  F(DeoptimizeFunction, 1, 1)             \
+  F(DeoptimizeFunction, 1, 1) \
+  F(OptimizeFunctionOnNextCall, 1, 1) \
   F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 2066b5a..9715ca9 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,29 +35,11 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Compound predicates.
-
-bool ScannerConstants::IsIdentifier(unibrow::CharacterStream* buffer) {
-  // Checks whether the buffer contains an identifier (no escape).
-  if (!buffer->has_more()) return false;
-  if (!kIsIdentifierStart.get(buffer->GetNext())) {
-    return false;
-  }
-  while (buffer->has_more()) {
-    if (!kIsIdentifierPart.get(buffer->GetNext())) {
-      return false;
-    }
-  }
-  return true;
-}
-
-// ----------------------------------------------------------------------------
 // Scanner
 
-Scanner::Scanner(ScannerConstants* scanner_constants)
-    : scanner_constants_(scanner_constants),
-      octal_pos_(kNoOctalLocation) {
-}
+Scanner::Scanner(UnicodeCache* unicode_cache)
+    : unicode_cache_(unicode_cache),
+      octal_pos_(kNoOctalLocation) { }
 
 
 uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -114,7 +96,7 @@
 // ----------------------------------------------------------------------------
 // JavaScriptScanner
 
-JavaScriptScanner::JavaScriptScanner(ScannerConstants* scanner_contants)
+JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
     : Scanner(scanner_contants) { }
 
 
@@ -144,9 +126,9 @@
   while (true) {
     // We treat byte-order marks (BOMs) as whitespace for better
     // compatibility with Spidermonkey and other JavaScript engines.
-    while (scanner_constants_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
+    while (unicode_cache_->IsWhiteSpace(c0_) || IsByteOrderMark(c0_)) {
       // IsWhiteSpace() includes line terminators!
-      if (scanner_constants_->IsLineTerminator(c0_)) {
+      if (unicode_cache_->IsLineTerminator(c0_)) {
         // Ignore line terminators, but remember them. This is necessary
         // for automatic semicolon insertion.
         has_line_terminator_before_next_ = true;
@@ -186,7 +168,7 @@
   // separately by the lexical grammar and becomes part of the
   // stream of input elements for the syntactic grammar (see
   // ECMA-262, section 7.4, page 12).
-  while (c0_ >= 0 && !scanner_constants_->IsLineTerminator(c0_)) {
+  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
     Advance();
   }
 
@@ -451,7 +433,7 @@
         break;
 
       default:
-        if (scanner_constants_->IsIdentifierStart(c0_)) {
+        if (unicode_cache_->IsIdentifierStart(c0_)) {
           token = ScanIdentifierOrKeyword();
         } else if (IsDecimalDigit(c0_)) {
           token = ScanNumber(false);
@@ -499,7 +481,7 @@
   Advance();
 
   // Skip escaped newlines.
-  if (scanner_constants_->IsLineTerminator(c)) {
+  if (unicode_cache_->IsLineTerminator(c)) {
     // Allow CR+LF newlines in multiline string literals.
     if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
     // Allow LF+CR newlines in multiline string literals.
@@ -542,7 +524,7 @@
 
   LiteralScope literal(this);
   while (c0_ != quote && c0_ >= 0
-         && !scanner_constants_->IsLineTerminator(c0_)) {
+         && !unicode_cache_->IsLineTerminator(c0_)) {
     uc32 c = c0_;
     Advance();
     if (c == '\\') {
@@ -641,7 +623,7 @@
   // not be an identifier start or a decimal digit; see ECMA-262
   // section 7.8.3, page 17 (note that we read only one decimal digit
   // if the value is 0).
-  if (IsDecimalDigit(c0_) || scanner_constants_->IsIdentifierStart(c0_))
+  if (IsDecimalDigit(c0_) || unicode_cache_->IsIdentifierStart(c0_))
     return Token::ILLEGAL;
 
   literal.Complete();
@@ -663,14 +645,14 @@
 
 
 Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
-  ASSERT(scanner_constants_->IsIdentifierStart(c0_));
+  ASSERT(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
   KeywordMatcher keyword_match;
   // Scan identifier start character.
   if (c0_ == '\\') {
     uc32 c = ScanIdentifierUnicodeEscape();
     // Only allow legal identifier start characters.
-    if (!scanner_constants_->IsIdentifierStart(c)) return Token::ILLEGAL;
+    if (!unicode_cache_->IsIdentifierStart(c)) return Token::ILLEGAL;
     AddLiteralChar(c);
     return ScanIdentifierSuffix(&literal);
   }
@@ -683,7 +665,7 @@
   }
 
   // Scan the rest of the identifier characters.
-  while (scanner_constants_->IsIdentifierPart(c0_)) {
+  while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ != '\\') {
       uc32 next_char = c0_;
       Advance();
@@ -701,11 +683,11 @@
 
 Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
   // Scan the rest of the identifier characters.
-  while (scanner_constants_->IsIdentifierPart(c0_)) {
+  while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
       uc32 c = ScanIdentifierUnicodeEscape();
       // Only allow legal identifier part characters.
-      if (!scanner_constants_->IsIdentifierPart(c)) return Token::ILLEGAL;
+      if (!unicode_cache_->IsIdentifierPart(c)) return Token::ILLEGAL;
       AddLiteralChar(c);
     } else {
       AddLiteralChar(c0_);
@@ -735,10 +717,10 @@
     AddLiteralChar('=');
 
   while (c0_ != '/' || in_character_class) {
-    if (scanner_constants_->IsLineTerminator(c0_) || c0_ < 0) return false;
+    if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
     if (c0_ == '\\') {  // Escape sequence.
       AddLiteralCharAdvance();
-      if (scanner_constants_->IsLineTerminator(c0_) || c0_ < 0) return false;
+      if (unicode_cache_->IsLineTerminator(c0_) || c0_ < 0) return false;
       AddLiteralCharAdvance();
       // If the escape allows more characters, i.e., \x??, \u????, or \c?,
       // only "safe" characters are allowed (letters, digits, underscore),
@@ -764,7 +746,7 @@
 bool JavaScriptScanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
-  while (scanner_constants_->IsIdentifierPart(c0_)) {
+  while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
       uc32 c = ScanIdentifierUnicodeEscape();
       if (c != static_cast<uc32>(unibrow::Utf8::kBadChar)) {
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 552f387..60b97d2 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -119,11 +119,11 @@
 };
 
 
-class ScannerConstants {
+class UnicodeCache {
 // ---------------------------------------------------------------------
-// Constants used by scanners.
+// Caching predicates used by scanners.
  public:
-  ScannerConstants() {}
+  UnicodeCache() {}
   typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
 
   StaticResource<Utf8Decoder>* utf8_decoder() {
@@ -135,8 +135,6 @@
   bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
   bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
 
-  bool IsIdentifier(unibrow::CharacterStream* buffer);
-
  private:
 
   unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
@@ -145,9 +143,10 @@
   unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
   StaticResource<Utf8Decoder> utf8_decoder_;
 
-  DISALLOW_COPY_AND_ASSIGN(ScannerConstants);
+  DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
 };
 
+
 // ----------------------------------------------------------------------------
 // LiteralBuffer -  Collector of chars of literals.
 
@@ -272,7 +271,7 @@
     bool complete_;
   };
 
-  explicit Scanner(ScannerConstants* scanner_contants);
+  explicit Scanner(UnicodeCache* scanner_contants);
 
   // Returns the current token again.
   Token::Value current_token() { return current_.token; }
@@ -427,7 +426,7 @@
     return source_->pos() - kCharacterLookaheadBufferSize;
   }
 
-  ScannerConstants* scanner_constants_;
+  UnicodeCache* unicode_cache_;
 
   // Buffers collecting literal strings, numbers, etc.
   LiteralBuffer literal_buffer1_;
@@ -473,7 +472,7 @@
     bool complete_;
   };
 
-  explicit JavaScriptScanner(ScannerConstants* scanner_contants);
+  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
 
   // Returns the next token.
   Token::Value Next();
diff --git a/src/scanner.cc b/src/scanner.cc
index d9c2188..666818e 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -345,8 +345,8 @@
 // ----------------------------------------------------------------------------
 // JsonScanner
 
-JsonScanner::JsonScanner(ScannerConstants* scanner_constants)
-    : Scanner(scanner_constants) { }
+JsonScanner::JsonScanner(UnicodeCache* unicode_cache)
+    : Scanner(unicode_cache) { }
 
 
 void JsonScanner::Initialize(UC16CharacterStream* source) {
@@ -560,7 +560,8 @@
   }
   literal.Complete();
   ASSERT_NOT_NULL(next_.literal_chars);
-  number_ = StringToDouble(next_.literal_chars->ascii_literal(),
+  number_ = StringToDouble(unicode_cache_,
+                           next_.literal_chars->ascii_literal(),
                            NO_FLAGS,  // Hex, octal or trailing junk.
                            OS::nan_value());
   return Token::NUMBER;
@@ -575,7 +576,7 @@
     Advance();
     text++;
   }
-  if (scanner_constants_->IsIdentifierPart(c0_)) return Token::ILLEGAL;
+  if (unicode_cache_->IsIdentifierPart(c0_)) return Token::ILLEGAL;
   literal.Complete();
   return token;
 }
diff --git a/src/scanner.h b/src/scanner.h
index 776ba53..871c69b 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,8 +134,8 @@
 
 class V8JavaScriptScanner : public JavaScriptScanner {
  public:
-  explicit V8JavaScriptScanner(ScannerConstants* scanner_constants)
-      : JavaScriptScanner(scanner_constants) {}
+  explicit V8JavaScriptScanner(UnicodeCache* unicode_cache)
+      : JavaScriptScanner(unicode_cache) {}
 
   void Initialize(UC16CharacterStream* source);
 };
@@ -143,7 +143,7 @@
 
 class JsonScanner : public Scanner {
  public:
-  explicit JsonScanner(ScannerConstants* scanner_constants);
+  explicit JsonScanner(UnicodeCache* unicode_cache);
 
   void Initialize(UC16CharacterStream* source);
 
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index cc9f816..2552af2 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -220,7 +220,7 @@
       ASSERT(index == this->index());
     }
 
-    inline Value(uint32_t value) : value_(value) {}
+    explicit inline Value(uint32_t value) : value_(value) {}
 
     uint32_t raw() { return value_; }
 
diff --git a/src/scopes.cc b/src/scopes.cc
index f4bcaa8..7d9bce5 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -120,7 +120,7 @@
     params_(0),
     unresolved_(0),
     decls_(0) {
-  SetDefaults(type, NULL, NULL);
+  SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
   ASSERT(!resolved());
 }
 
@@ -132,7 +132,7 @@
     params_(4),
     unresolved_(16),
     decls_(4) {
-  SetDefaults(type, outer_scope, NULL);
+  SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
@@ -142,14 +142,14 @@
 }
 
 
-Scope::Scope(Scope* inner_scope, SerializedScopeInfo* scope_info)
+Scope::Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info)
   : inner_scopes_(4),
     variables_(),
     temps_(4),
     params_(4),
     unresolved_(16),
     decls_(4) {
-  ASSERT(scope_info != NULL);
+  ASSERT(!scope_info.is_null());
   SetDefaults(FUNCTION_SCOPE, NULL, scope_info);
   ASSERT(resolved());
   if (scope_info->HasHeapAllocatedLocals()) {
@@ -181,6 +181,33 @@
 }
 
 
+void Scope::SetDefaults(Type type,
+                        Scope* outer_scope,
+                        Handle<SerializedScopeInfo> scope_info) {
+  outer_scope_ = outer_scope;
+  type_ = type;
+  scope_name_ = FACTORY->empty_symbol();
+  dynamics_ = NULL;
+  receiver_ = NULL;
+  function_ = NULL;
+  arguments_ = NULL;
+  arguments_shadow_ = NULL;
+  illegal_redecl_ = NULL;
+  scope_inside_with_ = false;
+  scope_contains_with_ = false;
+  scope_calls_eval_ = false;
+  // Inherit the strict mode from the parent scope.
+  strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
+  outer_scope_calls_eval_ = false;
+  inner_scope_calls_eval_ = false;
+  outer_scope_is_eval_scope_ = false;
+  force_eager_compilation_ = false;
+  num_stack_slots_ = 0;
+  num_heap_slots_ = 0;
+  scope_info_ = scope_info;
+}
+
+
 Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
                                     Scope* global_scope) {
   ASSERT(!info->closure().is_null());
@@ -193,8 +220,8 @@
     JSFunction* current = *info->closure();
     do {
       current = current->context()->closure();
-      SerializedScopeInfo* scope_info = current->shared()->scope_info();
-      if (scope_info != SerializedScopeInfo::Empty()) {
+      Handle<SerializedScopeInfo> scope_info(current->shared()->scope_info());
+      if (*scope_info != SerializedScopeInfo::Empty()) {
         scope = new Scope(scope, scope_info);
         if (innermost_scope == NULL) innermost_scope = scope;
       } else {
@@ -361,12 +388,14 @@
 }
 
 
-VariableProxy* Scope::NewUnresolved(Handle<String> name, bool inside_with) {
+VariableProxy* Scope::NewUnresolved(Handle<String> name,
+                                    bool inside_with,
+                                    int position) {
   // Note that we must not share the unresolved variables with
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
   ASSERT(!resolved());
-  VariableProxy* proxy = new VariableProxy(name, false, inside_with);
+  VariableProxy* proxy = new VariableProxy(name, false, inside_with, position);
   unresolved_.Add(proxy);
   return proxy;
 }
diff --git a/src/scopes.h b/src/scopes.h
index 24622b4..18db0cd 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -149,7 +149,9 @@
   void AddParameter(Variable* var);
 
   // Create a new unresolved variable.
-  virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with);
+  virtual VariableProxy* NewUnresolved(Handle<String> name,
+                                       bool inside_with,
+                                       int position = RelocInfo::kNoPosition);
 
   // Remove a unresolved variable. During parsing, an unresolved variable
   // may have been added optimistically, but then only the variable name
@@ -376,8 +378,8 @@
   int num_heap_slots_;
 
   // Serialized scopes support.
-  SerializedScopeInfo* scope_info_;
-  bool resolved() { return scope_info_ != NULL; }
+  Handle<SerializedScopeInfo> scope_info_;
+  bool resolved() { return !scope_info_.is_null(); }
 
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
@@ -412,7 +414,7 @@
   void AllocateVariablesRecursively();
 
  private:
-  Scope(Scope* inner_scope, SerializedScopeInfo* scope_info);
+  Scope(Scope* inner_scope, Handle<SerializedScopeInfo> scope_info);
 
   void AddInnerScope(Scope* inner_scope) {
     if (inner_scope != NULL) {
@@ -423,29 +425,7 @@
 
   void SetDefaults(Type type,
                    Scope* outer_scope,
-                   SerializedScopeInfo* scope_info) {
-    outer_scope_ = outer_scope;
-    type_ = type;
-    scope_name_ = FACTORY->empty_symbol();
-    dynamics_ = NULL;
-    receiver_ = NULL;
-    function_ = NULL;
-    arguments_ = NULL;
-    arguments_shadow_ = NULL;
-    illegal_redecl_ = NULL;
-    scope_inside_with_ = false;
-    scope_contains_with_ = false;
-    scope_calls_eval_ = false;
-    // Inherit the strict mode from the parent scope.
-    strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
-    outer_scope_calls_eval_ = false;
-    inner_scope_calls_eval_ = false;
-    outer_scope_is_eval_scope_ = false;
-    force_eager_compilation_ = false;
-    num_stack_slots_ = 0;
-    num_heap_slots_ = 0;
-    scope_info_ = scope_info;
-  }
+                   Handle<SerializedScopeInfo> scope_info);
 };
 
 
@@ -479,7 +459,9 @@
 
   virtual Variable* Lookup(Handle<String> name)  { return NULL; }
 
-  virtual VariableProxy* NewUnresolved(Handle<String> name, bool inside_with) {
+  virtual VariableProxy* NewUnresolved(Handle<String> name,
+                                       bool inside_with,
+                                       int position = RelocInfo::kNoPosition) {
     return NULL;
   }
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 20700e1..674078c 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1570,7 +1570,6 @@
       CASE(KEYED_EXTERNAL_ARRAY_STORE_IC);
       CASE(CALL_IC);
       CASE(KEYED_CALL_IC);
-      CASE(BINARY_OP_IC);
       CASE(TYPE_RECORDING_BINARY_OP_IC);
       CASE(COMPARE_IC);
     }
@@ -3014,7 +3013,8 @@
       }
 
       // Free the chunk.
-      heap()->mark_compact_collector()->ReportDeleteIfNeeded(object);
+      heap()->mark_compact_collector()->ReportDeleteIfNeeded(
+          object, heap()->isolate());
       LiveObjectList::ProcessNonLive(object);
 
       size_ -= static_cast<int>(chunk_size);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 435e71d..0c6a7f7 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1278,8 +1278,7 @@
 // StubCompiler implementation.
 
 
-MaybeObject* LoadCallbackProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
   ASSERT(args[0]->IsJSObject());
   ASSERT(args[1]->IsJSObject());
   AccessorInfo* callback = AccessorInfo::cast(args[3]);
@@ -1301,8 +1300,7 @@
 }
 
 
-MaybeObject* StoreCallbackProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
   JSObject* recv = JSObject::cast(args[0]);
   AccessorInfo* callback = AccessorInfo::cast(args[1]);
   Address setter_address = v8::ToCData<Address>(callback->setter());
@@ -1335,8 +1333,7 @@
  * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
  * provide any value for the given name.
  */
-MaybeObject* LoadPropertyWithInterceptorOnly(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
   Handle<String> name_handle = args.at<String>(0);
   Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
   ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
@@ -1435,8 +1432,7 @@
  * Loads a property with an interceptor performing post interceptor
  * lookup if interceptor failed.
  */
-MaybeObject* LoadPropertyWithInterceptorForLoad(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
   PropertyAttributes attr = NONE;
   Object* result;
   { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr);
@@ -1449,8 +1445,7 @@
 }
 
 
-MaybeObject* LoadPropertyWithInterceptorForCall(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) {
   PropertyAttributes attr;
   MaybeObject* result = LoadWithInterceptor(&args, &attr);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -1461,8 +1456,7 @@
 }
 
 
-MaybeObject* StoreInterceptorProperty(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) {
   ASSERT(args.length() == 4);
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
@@ -1478,8 +1472,7 @@
 }
 
 
-MaybeObject* KeyedLoadPropertyWithInterceptor(RUNTIME_CALLING_CONVENTION) {
-  RUNTIME_GET_ISOLATE;
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor) {
   JSObject* receiver = JSObject::cast(args[0]);
   ASSERT(Smi::cast(args[1])->value() >= 0);
   uint32_t index = Smi::cast(args[1])->value();
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 793f581..c5dcf36 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -392,23 +392,24 @@
 
 
 // Support functions for IC stubs for callbacks.
-MaybeObject* LoadCallbackProperty(RUNTIME_CALLING_CONVENTION);
-MaybeObject* StoreCallbackProperty(RUNTIME_CALLING_CONVENTION);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
 
 
 // Support functions for IC stubs for interceptors.
-MaybeObject* LoadPropertyWithInterceptorOnly(RUNTIME_CALLING_CONVENTION);
-MaybeObject* LoadPropertyWithInterceptorForLoad(RUNTIME_CALLING_CONVENTION);
-MaybeObject* LoadPropertyWithInterceptorForCall(RUNTIME_CALLING_CONVENTION);
-MaybeObject* StoreInterceptorProperty(RUNTIME_CALLING_CONVENTION);
-MaybeObject* CallInterceptorProperty(RUNTIME_CALLING_CONVENTION);
-MaybeObject* KeyedLoadPropertyWithInterceptor(RUNTIME_CALLING_CONVENTION);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
 
 
 // The stub compiler compiles stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
+  StubCompiler()
+      : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
 
   MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
   MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
diff --git a/src/top.cc b/src/top.cc
index 4a2a00b..a8dba71 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -29,6 +29,7 @@
 
 #include "api.h"
 #include "bootstrapper.h"
+#include "compiler.h"
 #include "debug.h"
 #include "execution.h"
 #include "messages.h"
@@ -37,26 +38,23 @@
 #include "string-stream.h"
 #include "vm-state-inl.h"
 
+
 // TODO(isolates): move to isolate.cc. This stuff is kept here to
 // simplify merging.
 
 namespace v8 {
 namespace internal {
 
-v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
-  return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+ThreadLocalTop::ThreadLocalTop() {
+  InitializeInternal();
 }
 
 
-void ThreadLocalTop::Initialize() {
+void ThreadLocalTop::InitializeInternal() {
   c_entry_fp_ = 0;
   handler_ = 0;
 #ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
-  simulator_ = Simulator::current(Isolate::Current());
-#elif V8_TARGET_ARCH_MIPS
-  simulator_ = Simulator::current(Isolate::Current());
-#endif
+  simulator_ = NULL;
 #endif
 #ifdef ENABLE_LOGGING_AND_PROFILING
   js_entry_sp_ = NULL;
@@ -67,8 +65,7 @@
 #endif
   try_catch_handler_address_ = NULL;
   context_ = NULL;
-  int id = Isolate::Current()->thread_manager()->CurrentId();
-  thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id;
+  thread_id_ = ThreadId::Invalid();
   external_caught_exception_ = false;
   failed_access_check_callback_ = NULL;
   save_context_ = NULL;
@@ -76,6 +73,24 @@
 }
 
 
+void ThreadLocalTop::Initialize() {
+  InitializeInternal();
+#ifdef USE_SIMULATOR
+#ifdef V8_TARGET_ARCH_ARM
+  simulator_ = Simulator::current(Isolate::Current());
+#elif V8_TARGET_ARCH_MIPS
+  simulator_ = Simulator::current(Isolate::Current());
+#endif
+#endif
+  thread_id_ = ThreadId::Current();
+}
+
+
+v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
+  return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address());
+}
+
+
 Address Isolate::get_address_from_id(Isolate::AddressId id) {
   return isolate_addresses_[id];
 }
@@ -89,13 +104,13 @@
 
 
 void Isolate::IterateThread(ThreadVisitor* v) {
-  v->VisitThread(thread_local_top());
+  v->VisitThread(this, thread_local_top());
 }
 
 
 void Isolate::IterateThread(ThreadVisitor* v, char* t) {
   ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
-  v->VisitThread(thread);
+  v->VisitThread(this, thread);
 }
 
 
@@ -125,7 +140,7 @@
   }
 
   // Iterate over pointers on native execution stack.
-  for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
+  for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
     it.frame()->Iterate(v);
   }
 }
@@ -204,12 +219,13 @@
   Handle<String> constructor_key =
       factory()->LookupAsciiSymbol("isConstructor");
 
-  StackTraceFrameIterator it;
+  StackTraceFrameIterator it(this);
   int frames_seen = 0;
   while (!it.done() && (frames_seen < limit)) {
     JavaScriptFrame* frame = it.frame();
-
-    List<FrameSummary> frames(3);  // Max 2 levels of inlining.
+    // Set initial size to the maximum inlining level + 1 for the outermost
+    // function.
+    List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
     frame->Summarize(&frames);
     for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
       // Create a JSObject to hold the information for the StackFrame.
@@ -530,19 +546,19 @@
   // the message for stack overflow exceptions which is very likely to
   // double fault with another stack overflow exception, we use a
   // precomputed message.
-  DoThrow(*exception, NULL, kStackOverflowMessage);
+  DoThrow(*exception, NULL);
   return Failure::Exception();
 }
 
 
 Failure* Isolate::TerminateExecution() {
-  DoThrow(heap_.termination_exception(), NULL, NULL);
+  DoThrow(heap_.termination_exception(), NULL);
   return Failure::Exception();
 }
 
 
 Failure* Isolate::Throw(Object* exception, MessageLocation* location) {
-  DoThrow(exception, location, NULL);
+  DoThrow(exception, location);
   return Failure::Exception();
 }
 
@@ -584,12 +600,12 @@
 
 
 void Isolate::PrintCurrentStackTrace(FILE* out) {
-  StackTraceFrameIterator it;
+  StackTraceFrameIterator it(this);
   while (!it.done()) {
     HandleScope scope;
     // Find code position if recorded in relocation info.
     JavaScriptFrame* frame = it.frame();
-    int pos = frame->LookupCode(this)->SourcePosition(frame->pc());
+    int pos = frame->LookupCode()->SourcePosition(frame->pc());
     Handle<Object> pos_obj(Smi::FromInt(pos));
     // Fetch function and receiver.
     Handle<JSFunction> fun(JSFunction::cast(frame->function()));
@@ -613,14 +629,14 @@
 
 void Isolate::ComputeLocation(MessageLocation* target) {
   *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
-  StackTraceFrameIterator it;
+  StackTraceFrameIterator it(this);
   if (!it.done()) {
     JavaScriptFrame* frame = it.frame();
     JSFunction* fun = JSFunction::cast(frame->function());
     Object* script = fun->shared()->script();
     if (script->IsScript() &&
         !(Script::cast(script)->source()->IsUndefined())) {
-      int pos = frame->LookupCode(this)->SourcePosition(frame->pc());
+      int pos = frame->LookupCode()->SourcePosition(frame->pc());
       // Compute the location from the function and the reloc info.
       Handle<Script> casted_script(Script::cast(script));
       *target = MessageLocation(casted_script, pos, pos + 1);
@@ -660,9 +676,7 @@
 }
 
 
-void Isolate::DoThrow(MaybeObject* exception,
-                      MessageLocation* location,
-                      const char* message) {
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
   ASSERT(!has_pending_exception());
 
   HandleScope scope;
@@ -719,7 +733,6 @@
 
   // Save the message for reporting if the the exception remains uncaught.
   thread_local_top()->has_pending_message_ = report_exception;
-  thread_local_top()->pending_message_ = message;
   if (!message_obj.is_null()) {
     thread_local_top()->pending_message_obj_ = *message_obj;
     if (location != NULL) {
@@ -791,55 +804,36 @@
 
 void Isolate::ReportPendingMessages() {
   ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
   // If the pending exception is OutOfMemoryException set out_of_memory in
   // the global context.  Note: We have to mark the global context here
   // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
   // set it.
-  bool external_caught = IsExternallyCaught();
-  thread_local_top()->external_caught_exception_ = external_caught;
-  HandleScope scope(this);
-  if (thread_local_top()->pending_exception_ ==
-      Failure::OutOfMemoryException()) {
+  HandleScope scope;
+  if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
     context()->mark_out_of_memory();
-  } else if (thread_local_top()->pending_exception_ ==
-             heap_.termination_exception()) {
-    if (external_caught) {
-      try_catch_handler()->can_continue_ = false;
-      try_catch_handler()->exception_ = heap_.null_value();
-    }
+  } else if (thread_local_top_.pending_exception_ ==
+             heap()->termination_exception()) {
+    // Do nothing: if needed, the exception has been already propagated to
+    // v8::TryCatch.
   } else {
-    // At this point all non-object (failure) exceptions have
-    // been dealt with so this shouldn't fail.
-    Object* pending_exception_object = pending_exception()->ToObjectUnchecked();
-    Handle<Object> exception(pending_exception_object);
-    thread_local_top()->external_caught_exception_ = false;
-    if (external_caught) {
-      try_catch_handler()->can_continue_ = true;
-      try_catch_handler()->exception_ = thread_local_top()->pending_exception_;
-      if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
-        try_catch_handler()->message_ =
-            thread_local_top()->pending_message_obj_;
-      }
-    }
-    if (thread_local_top()->has_pending_message_) {
-      thread_local_top()->has_pending_message_ = false;
-      if (thread_local_top()->pending_message_ != NULL) {
-        MessageHandler::ReportMessage(thread_local_top()->pending_message_);
-      } else if (!thread_local_top()->pending_message_obj_->IsTheHole()) {
-        Handle<Object> message_obj(thread_local_top()->pending_message_obj_);
-        if (thread_local_top()->pending_message_script_ != NULL) {
-          Handle<Script> script(thread_local_top()->pending_message_script_);
-          int start_pos = thread_local_top()->pending_message_start_pos_;
-          int end_pos = thread_local_top()->pending_message_end_pos_;
+    if (thread_local_top_.has_pending_message_) {
+      thread_local_top_.has_pending_message_ = false;
+      if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
+        HandleScope scope;
+        Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+        if (thread_local_top_.pending_message_script_ != NULL) {
+          Handle<Script> script(thread_local_top_.pending_message_script_);
+          int start_pos = thread_local_top_.pending_message_start_pos_;
+          int end_pos = thread_local_top_.pending_message_end_pos_;
           MessageLocation location(script, start_pos, end_pos);
-          MessageHandler::ReportMessage(&location, message_obj);
+          MessageHandler::ReportMessage(this, &location, message_obj);
         } else {
-          MessageHandler::ReportMessage(NULL, message_obj);
+          MessageHandler::ReportMessage(this, NULL, message_obj);
         }
       }
     }
-    thread_local_top()->external_caught_exception_ = external_caught;
-    set_pending_exception(*exception);
   }
   clear_pending_message();
 }
@@ -851,6 +845,9 @@
 
 
 bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
+  ASSERT(has_pending_exception());
+  PropagatePendingExceptionToExternalTryCatch();
+
   // Allways reschedule out of memory exceptions.
   if (!is_out_of_memory()) {
     bool is_termination_exception =
@@ -965,7 +962,7 @@
   memcpy(reinterpret_cast<char*>(thread_local_top()), from,
          sizeof(ThreadLocalTop));
   // This might be just paranoia, but it seems to be needed in case a
-  // thread_local_ is restored on a separate OS thread.
+  // thread_local_top_ is restored on a separate OS thread.
 #ifdef USE_SIMULATOR
 #ifdef V8_TARGET_ARCH_ARM
   thread_local_top()->simulator_ = Simulator::current(this);
diff --git a/src/type-info.cc b/src/type-info.cc
index 78f693c..1940601 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -89,7 +89,7 @@
 }
 
 
-bool TypeFeedbackOracle::StoreIsMonomorphic(Assignment* expr) {
+bool TypeFeedbackOracle::StoreIsMonomorphic(Expression* expr) {
   Handle<Object> map_or_code(GetInfo(expr->position()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
@@ -119,7 +119,7 @@
 }
 
 
-Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Assignment* expr) {
+Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphic(expr));
   Handle<HeapObject> map_or_code(
       Handle<HeapObject>::cast(GetInfo(expr->position())));
@@ -178,7 +178,7 @@
 }
 
 ExternalArrayType TypeFeedbackOracle::GetKeyedStoreExternalArrayType(
-    Assignment* expr) {
+    Expression* expr) {
   Handle<Object> stub = GetInfo(expr->position());
   ASSERT(stub->IsCode());
   return Code::cast(*stub)->external_array_type();
@@ -244,22 +244,7 @@
   TypeInfo unknown = TypeInfo::Unknown();
   if (!object->IsCode()) return unknown;
   Handle<Code> code = Handle<Code>::cast(object);
-  if (code->is_binary_op_stub()) {
-    BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
-        code->binary_op_type());
-    switch (type) {
-      case BinaryOpIC::UNINIT_OR_SMI:
-        return TypeInfo::Smi();
-      case BinaryOpIC::DEFAULT:
-        return (expr->op() == Token::DIV || expr->op() == Token::MUL)
-            ? TypeInfo::Double()
-            : TypeInfo::Integer32();
-      case BinaryOpIC::HEAP_NUMBERS:
-        return TypeInfo::Double();
-      default:
-        return unknown;
-    }
-  } else if (code->is_type_recording_binary_op_stub()) {
+  if (code->is_type_recording_binary_op_stub()) {
     TRBinaryOpIC::TypeInfo type = static_cast<TRBinaryOpIC::TypeInfo>(
         code->type_recording_binary_op_type());
     TRBinaryOpIC::TypeInfo result_type = static_cast<TRBinaryOpIC::TypeInfo>(
@@ -290,6 +275,8 @@
         return TypeInfo::Integer32();
       case TRBinaryOpIC::HEAP_NUMBER:
         return TypeInfo::Double();
+      case TRBinaryOpIC::BOTH_STRING:
+        return TypeInfo::String();
       case TRBinaryOpIC::STRING:
       case TRBinaryOpIC::GENERIC:
         return unknown;
@@ -355,6 +342,18 @@
 }
 
 
+void TypeFeedbackOracle::SetInfo(int position, Object* target) {
+  MaybeObject* maybe_result = dictionary_->AtNumberPut(position, target);
+  USE(maybe_result);
+#ifdef DEBUG
+  Object* result;
+  // Dictionary has been allocated with sufficient size for all elements.
+  ASSERT(maybe_result->ToObject(&result));
+  ASSERT(*dictionary_ == result);
+#endif
+}
+
+
 void TypeFeedbackOracle::PopulateMap(Handle<Code> code) {
   Isolate* isolate = Isolate::Current();
   HandleScope scope(isolate);
@@ -371,51 +370,43 @@
   int length = code_positions.length();
   ASSERT(source_positions.length() == length);
   for (int i = 0; i < length; i++) {
-    HandleScope loop_scope(isolate);
+    AssertNoAllocation no_allocation;
     RelocInfo info(code->instruction_start() + code_positions[i],
                    RelocInfo::CODE_TARGET, 0);
-    Handle<Code> target(Code::GetCodeFromTargetAddress(info.target_address()));
+    Code* target = Code::GetCodeFromTargetAddress(info.target_address());
     int position = source_positions[i];
     InlineCacheState state = target->ic_state();
     Code::Kind kind = target->kind();
-    Handle<Object> value;
-    if (kind == Code::BINARY_OP_IC ||
-        kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
+
+    if (kind == Code::TYPE_RECORDING_BINARY_OP_IC ||
         kind == Code::COMPARE_IC) {
       // TODO(kasperl): Avoid having multiple ICs with the same
       // position by making sure that we have position information
       // recorded for all binary ICs.
       int entry = dictionary_->FindEntry(position);
       if (entry == NumberDictionary::kNotFound) {
-        value = target;
+        SetInfo(position, target);
       }
     } else if (state == MONOMORPHIC) {
       if (kind == Code::KEYED_EXTERNAL_ARRAY_LOAD_IC ||
           kind == Code::KEYED_EXTERNAL_ARRAY_STORE_IC) {
-        value = target;
+        SetInfo(position, target);
       } else if (target->kind() != Code::CALL_IC ||
           target->check_type() == RECEIVER_MAP_CHECK) {
         Map* map = target->FindFirstMap();
         if (map == NULL) {
-          value = target;
+          SetInfo(position, target);
         } else {
-          value = Handle<Map>(map);
+          SetInfo(position, map);
         }
       } else {
         ASSERT(target->kind() == Code::CALL_IC);
         CheckType check = target->check_type();
         ASSERT(check != RECEIVER_MAP_CHECK);
-        value = Handle<Object>(Smi::FromInt(check));
+        SetInfo(position, Smi::FromInt(check));
       }
     } else if (state == MEGAMORPHIC) {
-      value = target;
-    }
-
-    if (!value.is_null()) {
-      Handle<NumberDictionary> new_dict =
-          isolate->factory()->DictionaryAtNumberPut(
-              dictionary_, position, value);
-      dictionary_ = loop_scope.CloseAndEscape(new_dict);
+      SetInfo(position, target);
     }
   }
   // Allocate handle in the parent scope.
@@ -441,9 +432,7 @@
       if (target->is_inline_cache_stub()) {
         InlineCacheState state = target->ic_state();
         Code::Kind kind = target->kind();
-        if (kind == Code::BINARY_OP_IC) {
-          if (target->binary_op_type() == BinaryOpIC::GENERIC) continue;
-        } else if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
+        if (kind == Code::TYPE_RECORDING_BINARY_OP_IC) {
           if (target->type_recording_binary_op_type() ==
               TRBinaryOpIC::GENERIC) {
             continue;
diff --git a/src/type-info.h b/src/type-info.h
index c068489..f6e6729 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -239,18 +239,18 @@
   TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
 
   bool LoadIsMonomorphic(Property* expr);
-  bool StoreIsMonomorphic(Assignment* expr);
+  bool StoreIsMonomorphic(Expression* expr);
   bool CallIsMonomorphic(Call* expr);
 
   Handle<Map> LoadMonomorphicReceiverType(Property* expr);
-  Handle<Map> StoreMonomorphicReceiverType(Assignment* expr);
+  Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
 
   ZoneMapList* LoadReceiverTypes(Property* expr, Handle<String> name);
   ZoneMapList* StoreReceiverTypes(Assignment* expr, Handle<String> name);
   ZoneMapList* CallReceiverTypes(Call* expr, Handle<String> name);
 
   ExternalArrayType GetKeyedLoadExternalArrayType(Property* expr);
-  ExternalArrayType GetKeyedStoreExternalArrayType(Assignment* expr);
+  ExternalArrayType GetKeyedStoreExternalArrayType(Expression* expr);
 
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
@@ -267,6 +267,8 @@
                                     Handle<String> name,
                                     Code::Flags flags);
 
+  void SetInfo(int position, Object* target);
+
   void PopulateMap(Handle<Code> code);
 
   void CollectPositions(Code* code,
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 04482e8..5e765b2 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -202,9 +202,6 @@
   SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
   SC(array_function_native, V8.ArrayFunctionNative)                   \
   SC(for_in, V8.ForIn)                                                \
-  SC(memcopy_aligned, V8.MemCopyAligned)                              \
-  SC(memcopy_unaligned, V8.MemCopyUnaligned)                          \
-  SC(memcopy_noxmm, V8.MemCopyNoXMM)                                  \
   SC(enum_cache_hits, V8.EnumCacheHits)                               \
   SC(enum_cache_misses, V8.EnumCacheMisses)                           \
   SC(zone_segment_bytes, V8.ZoneSegmentBytes)                         \
diff --git a/src/v8.cc b/src/v8.cc
index 8153372..0b562fc 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,6 +41,9 @@
 namespace v8 {
 namespace internal {
 
+static Mutex* init_once_mutex = OS::CreateMutex();
+static bool init_once_called = false;
+
 bool V8::is_running_ = false;
 bool V8::has_been_setup_ = false;
 bool V8::has_been_disposed_ = false;
@@ -49,6 +52,8 @@
 
 
 bool V8::Initialize(Deserializer* des) {
+  InitializeOncePerProcess();
+
   // The current thread may not yet had entered an isolate to run.
   // Note the Isolate::Current() may be non-null because for various
   // initialization purposes an initializing thread may be assigned an isolate
@@ -58,8 +63,8 @@
   }
 
   ASSERT(i::Isolate::CurrentPerIsolateThreadData() != NULL);
-  ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id() ==
-         i::Thread::GetThreadLocalInt(i::Isolate::thread_id_key()));
+  ASSERT(i::Isolate::CurrentPerIsolateThreadData()->thread_id().Equals(
+           i::ThreadId::Current()));
   ASSERT(i::Isolate::CurrentPerIsolateThreadData()->isolate() ==
          i::Isolate::Current());
 
@@ -68,15 +73,6 @@
   Isolate* isolate = Isolate::Current();
   if (isolate->IsInitialized()) return true;
 
-#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
-  use_crankshaft_ = false;
-#else
-  use_crankshaft_ = FLAG_crankshaft;
-#endif
-
-  // Peephole optimization might interfere with deoptimization.
-  FLAG_peephole_optimization = !use_crankshaft_;
-
   is_running_ = true;
   has_been_setup_ = true;
   has_fatal_error_ = false;
@@ -171,8 +167,8 @@
 } double_int_union;
 
 
-Object* V8::FillHeapNumberWithRandom(Object* heap_number) {
-  uint64_t random_bits = Random(Isolate::Current());
+Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
+  uint64_t random_bits = Random(isolate);
   // Make a double* from address (heap_number + sizeof(double)).
   double_int_union* r = reinterpret_cast<double_int_union*>(
       reinterpret_cast<char*>(heap_number) +
@@ -188,4 +184,30 @@
   return heap_number;
 }
 
+
+void V8::InitializeOncePerProcess() {
+  ScopedLock lock(init_once_mutex);
+  if (init_once_called) return;
+  init_once_called = true;
+
+  // Setup the platform OS support.
+  OS::Setup();
+
+  use_crankshaft_ = FLAG_crankshaft;
+
+  if (Serializer::enabled()) {
+    use_crankshaft_ = false;
+  }
+
+  CPU::Setup();
+  if (!CPU::SupportsCrankshaft()) {
+    use_crankshaft_ = false;
+  }
+
+  RuntimeProfiler::GlobalSetup();
+
+  // Peephole optimization might interfere with deoptimization.
+  FLAG_peephole_optimization = !use_crankshaft_;
+}
+
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index e7ca0d2..776fa9c 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -84,7 +84,6 @@
   static void TearDown();
   static bool IsRunning() { return is_running_; }
   static bool UseCrankshaft() { return use_crankshaft_; }
-  static void DisableCrankshaft() { use_crankshaft_ = false; }
   // To be dead you have to have lived
   // TODO(isolates): move IsDead to Isolate.
   static bool IsDead() { return has_fatal_error_ || has_been_disposed_; }
@@ -101,12 +100,15 @@
   // use a separate random state for internal random number
   // generation.
   static uint32_t RandomPrivate(Isolate* isolate);
-  static Object* FillHeapNumberWithRandom(Object* heap_number);
+  static Object* FillHeapNumberWithRandom(Object* heap_number,
+                                          Isolate* isolate);
 
   // Idle notification directly from the API.
   static bool IdleNotification();
 
  private:
+  static void InitializeOncePerProcess();
+
   // True if engine is currently running
   static bool is_running_;
   // True if V8 has ever been run
diff --git a/src/v8threads.cc b/src/v8threads.cc
index cecafaa..4b033fc 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -147,11 +147,11 @@
   // First check whether the current thread has been 'lazily archived', ie
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
-  if (lazily_archived_thread_.IsSelf()) {
-    lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+  if (lazily_archived_thread_.Equals(ThreadId::Current())) {
+    lazily_archived_thread_ = ThreadId::Invalid();
     ASSERT(Isolate::CurrentPerIsolateThreadData()->thread_state() ==
            lazily_archived_thread_state_);
-    lazily_archived_thread_state_->set_id(kInvalidId);
+    lazily_archived_thread_state_->set_id(ThreadId::Invalid());
     lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
     lazily_archived_thread_state_ = NULL;
     Isolate::CurrentPerIsolateThreadData()->set_thread_state(NULL);
@@ -190,7 +190,7 @@
     isolate_->stack_guard()->TerminateExecution();
     state->set_terminate_on_restore(false);
   }
-  state->set_id(kInvalidId);
+  state->set_id(ThreadId::Invalid());
   state->Unlink();
   state->LinkInto(ThreadState::FREE_LIST);
   return true;
@@ -199,13 +199,13 @@
 
 void ThreadManager::Lock() {
   mutex_->Lock();
-  mutex_owner_.Initialize(ThreadHandle::SELF);
+  mutex_owner_ = ThreadId::Current();
   ASSERT(IsLockedByCurrentThread());
 }
 
 
 void ThreadManager::Unlock() {
-  mutex_owner_.Initialize(ThreadHandle::INVALID);
+  mutex_owner_ = ThreadId::Invalid();
   mutex_->Unlock();
 }
 
@@ -224,7 +224,7 @@
 
 
 ThreadState::ThreadState(ThreadManager* thread_manager)
-    : id_(ThreadManager::kInvalidId),
+    : id_(ThreadId::Invalid()),
       terminate_on_restore_(false),
       next_(this),
       previous_(this),
@@ -282,8 +282,8 @@
 // defined as 0.)
 ThreadManager::ThreadManager()
     : mutex_(OS::CreateMutex()),
-      mutex_owner_(ThreadHandle::INVALID),
-      lazily_archived_thread_(ThreadHandle::INVALID),
+      mutex_owner_(ThreadId::Invalid()),
+      lazily_archived_thread_(ThreadId::Invalid()),
       lazily_archived_thread_state_(NULL),
       free_anchor_(NULL),
       in_use_anchor_(NULL) {
@@ -298,16 +298,16 @@
 
 
 void ThreadManager::ArchiveThread() {
-  ASSERT(!lazily_archived_thread_.IsValid());
+  ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
   ASSERT(!IsArchived());
   ThreadState* state = GetFreeThreadState();
   state->Unlink();
   Isolate::CurrentPerIsolateThreadData()->set_thread_state(state);
-  lazily_archived_thread_.Initialize(ThreadHandle::SELF);
+  lazily_archived_thread_ = ThreadId::Current();
   lazily_archived_thread_state_ = state;
-  ASSERT(state->id() == kInvalidId);
+  ASSERT(state->id().Equals(ThreadId::Invalid()));
   state->set_id(CurrentId());
-  ASSERT(state->id() != kInvalidId);
+  ASSERT(!state->id().Equals(ThreadId::Invalid()));
 }
 
 
@@ -326,7 +326,7 @@
   to = isolate_->stack_guard()->ArchiveStackGuard(to);
   to = isolate_->regexp_stack()->ArchiveStack(to);
   to = isolate_->bootstrapper()->ArchiveState(to);
-  lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
+  lazily_archived_thread_ = ThreadId::Invalid();
   lazily_archived_thread_state_ = NULL;
 }
 
@@ -373,16 +373,16 @@
 }
 
 
-int ThreadManager::CurrentId() {
-  return Thread::GetThreadLocalInt(Isolate::thread_id_key());
+ThreadId ThreadManager::CurrentId() {
+  return ThreadId::Current();
 }
 
 
-void ThreadManager::TerminateExecution(int thread_id) {
+void ThreadManager::TerminateExecution(ThreadId thread_id) {
   for (ThreadState* state = FirstThreadStateInUse();
        state != NULL;
        state = state->Next()) {
-    if (thread_id == state->id()) {
+    if (thread_id.Equals(state->id())) {
       state->set_terminate_on_restore(true);
     }
   }
diff --git a/src/v8threads.h b/src/v8threads.h
index f1992ad..d8a923e 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -43,8 +43,8 @@
   void Unlink();
 
   // Id of thread.
-  void set_id(int id) { id_ = id; }
-  int id() { return id_; }
+  void set_id(ThreadId id) { id_ = id; }
+  ThreadId id() { return id_; }
 
   // Should the thread be terminated when it is restored?
   bool terminate_on_restore() { return terminate_on_restore_; }
@@ -59,7 +59,7 @@
 
   void AllocateSpace();
 
-  int id_;
+  ThreadId id_;
   bool terminate_on_restore_;
   char* data_;
   ThreadState* next_;
@@ -78,7 +78,7 @@
 class ThreadVisitor {
  public:
   // ThreadLocalTop may be only available during this call.
-  virtual void VisitThread(ThreadLocalTop* top) = 0;
+  virtual void VisitThread(Isolate* isolate, ThreadLocalTop* top) = 0;
 
  protected:
   virtual ~ThreadVisitor() {}
@@ -97,17 +97,18 @@
 
   void Iterate(ObjectVisitor* v);
   void IterateArchivedThreads(ThreadVisitor* v);
-  bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
+  bool IsLockedByCurrentThread() {
+    return mutex_owner_.Equals(ThreadId::Current());
+  }
 
-  int CurrentId();
+  ThreadId CurrentId();
 
-  void TerminateExecution(int thread_id);
+  void TerminateExecution(ThreadId thread_id);
 
   // Iterate over in-use states.
   ThreadState* FirstThreadStateInUse();
   ThreadState* GetFreeThreadState();
 
-  static const int kInvalidId = -1;
  private:
   ThreadManager();
   ~ThreadManager();
@@ -115,8 +116,8 @@
   void EagerlyArchiveThread();
 
   Mutex* mutex_;
-  ThreadHandle mutex_owner_;
-  ThreadHandle lazily_archived_thread_;
+  ThreadId mutex_owner_;
+  ThreadId lazily_archived_thread_;
   ThreadState* lazily_archived_thread_state_;
 
   // In the following two lists there is always at least one object on the list.
diff --git a/src/v8utils.h b/src/v8utils.h
index 0aa53ca..93fc1fd 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -120,7 +120,9 @@
 // Memory
 
 // Copies data from |src| to |dst|.  The data spans MUST not overlap.
-inline void CopyWords(Object** dst, Object** src, int num_words) {
+template <typename T>
+inline void CopyWords(T* dst, T* src, int num_words) {
+  STATIC_ASSERT(sizeof(T) == kPointerSize);
   ASSERT(Min(dst, src) + num_words <= Max(dst, src));
   ASSERT(num_words > 0);
 
@@ -254,51 +256,14 @@
 };
 
 
-// Custom memcpy implementation for platforms where the standard version
-// may not be good enough.
-#if defined(V8_TARGET_ARCH_IA32)
-
-// The default memcpy on ia32 architectures is generally not as efficient
-// as possible. (If any further ia32 platforms are introduced where the
-// memcpy function is efficient, exclude them from this branch).
-
-typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
-
-// Implemented in codegen-<arch>.cc.
-MemCopyFunction CreateMemCopyFunction();
-
-// Copy memory area to disjoint memory area.
-static inline void MemCopy(void* dest, const void* src, size_t size) {
-  static MemCopyFunction memcopy = CreateMemCopyFunction();
-  (*memcopy)(dest, src, size);
-#ifdef DEBUG
-  CHECK_EQ(0, memcmp(dest, src, size));
-#endif
-}
-
-// Limit below which the extra overhead of the MemCopy function is likely
-// to outweigh the benefits of faster copying.
-static const int kMinComplexMemCopy = 64;
-
-#else  // V8_TARGET_ARCH_IA32
-
-static inline void MemCopy(void* dest, const void* src, size_t size) {
-  memcpy(dest, src, size);
-}
-
-static const int kMinComplexMemCopy = 256;
-
-#endif  // V8_TARGET_ARCH_IA32
-
-
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
 static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
-    if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
-      MemCopy(dest, src, chars * sizeof(*dest));
+    if (chars >= static_cast<int>(OS::kMinComplexMemCopy / sizeof(*dest))) {
+      OS::MemCopy(dest, src, chars * sizeof(*dest));
       return;
     }
     // Number of characters in a uintptr_t.
diff --git a/src/variables.cc b/src/variables.cc
index fa7ce1b..0502722 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,26 +35,8 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// Implementation StaticType.
-
-
-const char* StaticType::Type2String(StaticType* type) {
-  switch (type->kind_) {
-    case UNKNOWN:
-      return "UNKNOWN";
-    case LIKELY_SMI:
-      return "LIKELY_SMI";
-    default:
-      UNREACHABLE();
-  }
-  return "UNREACHABLE";
-}
-
-
-// ----------------------------------------------------------------------------
 // Implementation Variable.
 
-
 const char* Variable::Mode2String(Mode mode) {
   switch (mode) {
     case VAR: return "VAR";
diff --git a/src/variables.h b/src/variables.h
index 67e1a18..b1ff0db 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,46 +33,6 @@
 namespace v8 {
 namespace internal {
 
-// Variables and AST expression nodes can track their "type" to enable
-// optimizations and removal of redundant checks when generating code.
-
-class StaticType {
- public:
-  enum Kind {
-    UNKNOWN,
-    LIKELY_SMI
-  };
-
-  StaticType() : kind_(UNKNOWN) {}
-
-  bool Is(Kind kind) const { return kind_ == kind; }
-
-  bool IsKnown() const { return !Is(UNKNOWN); }
-  bool IsUnknown() const { return Is(UNKNOWN); }
-  bool IsLikelySmi() const { return Is(LIKELY_SMI); }
-
-  void CopyFrom(StaticType* other) {
-    kind_ = other->kind_;
-  }
-
-  static const char* Type2String(StaticType* type);
-
-  // LIKELY_SMI accessors
-  void SetAsLikelySmi() {
-    kind_ = LIKELY_SMI;
-  }
-
-  void SetAsLikelySmiIfUnknown() {
-    if (IsUnknown()) {
-      SetAsLikelySmi();
-    }
-  }
-
- private:
-  Kind kind_;
-};
-
-
 // The AST refers to variables via VariableProxies - placeholders for the actual
 // variables. Variables themselves are never directly referred to from the AST,
 // they are maintained by scopes, and referred to from VariableProxies and Slots
@@ -181,8 +141,6 @@
   Expression* rewrite() const { return rewrite_; }
   void set_rewrite(Expression* expr) { rewrite_ = expr; }
 
-  StaticType* type() { return &type_; }
-
  private:
   Scope* scope_;
   Handle<String> name_;
@@ -191,9 +149,6 @@
 
   Variable* local_if_not_shadowed_;
 
-  // Static type information
-  StaticType type_;
-
   // Code generation.
   // rewrite_ is usually a Slot or a Property, but may be any expression.
   Expression* rewrite_;
diff --git a/src/version.cc b/src/version.cc
index 6104dac..25939c2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     2
-#define BUILD_NUMBER      6
-#define PATCH_LEVEL       0
+#define MINOR_VERSION     3
+#define BUILD_NUMBER      0
+#define PATCH_LEVEL       1
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/virtual-frame-heavy-inl.h b/src/virtual-frame-heavy-inl.h
deleted file mode 100644
index cf12eca..0000000
--- a/src/virtual-frame-heavy-inl.h
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_HEAVY_INL_H_
-#define V8_VIRTUAL_FRAME_HEAVY_INL_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "register-allocator-inl.h"
-#include "codegen-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame()
-    : elements_(parameter_count() + local_count() + kPreallocatedElements),
-      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
-  for (int i = 0; i <= stack_pointer_; i++) {
-    elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
-  }
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    register_locations_[i] = kIllegalIndex;
-  }
-}
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
-    : elements_(original->element_count()),
-      stack_pointer_(original->stack_pointer_) {
-  elements_.AddAll(original->elements_);
-  // Copy register locations from original.
-  memcpy(&register_locations_,
-         original->register_locations_,
-         sizeof(register_locations_));
-}
-
-
-void VirtualFrame::PushFrameSlotAt(int index) {
-  elements_.Add(CopyElementAt(index));
-}
-
-
-void VirtualFrame::Push(Register reg, TypeInfo info) {
-  if (is_used(reg)) {
-    int index = register_location(reg);
-    FrameElement element = CopyElementAt(index, info);
-    elements_.Add(element);
-  } else {
-    Use(reg, element_count());
-    FrameElement element =
-        FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info);
-    elements_.Add(element);
-  }
-}
-
-
-bool VirtualFrame::ConstantPoolOverflowed() {
-  return FrameElement::ConstantPoolOverflowed();
-}
-
-
-bool VirtualFrame::Equals(VirtualFrame* other) {
-#ifdef DEBUG
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (register_location(i) != other->register_location(i)) {
-      return false;
-    }
-  }
-  if (element_count() != other->element_count()) return false;
-#endif
-  if (stack_pointer_ != other->stack_pointer_) return false;
-  for (int i = 0; i < element_count(); i++) {
-    if (!elements_[i].Equals(other->elements_[i])) return false;
-  }
-
-  return true;
-}
-
-
-void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) {
-  elements_[local0_index() + index].set_type_info(info);
-}
-
-
-// Make the type of all elements be MEMORY.
-void VirtualFrame::SpillAll() {
-  for (int i = 0; i < element_count(); i++) {
-    SpillElementAt(i);
-  }
-}
-
-
-void VirtualFrame::PrepareForReturn() {
-  // Spill all locals. This is necessary to make sure all locals have
-  // the right value when breaking at the return site in the debugger.
-  for (int i = 0; i < expression_base_index(); i++) {
-    SpillElementAt(i);
-  }
-}
-
-
-void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) {
-  elements_[param0_index() + index].set_type_info(info);
-}
-
-
-void VirtualFrame::Nip(int num_dropped) {
-  ASSERT(num_dropped >= 0);
-  if (num_dropped == 0) return;
-  Result tos = Pop();
-  if (num_dropped > 1) {
-    Drop(num_dropped - 1);
-  }
-  SetElementAt(0, &tos);
-}
-
-
-void VirtualFrame::Push(Smi* value) {
-  Push(Handle<Object> (value));
-}
-
-
-int VirtualFrame::register_location(Register reg) {
-  return register_locations_[RegisterAllocator::ToNumber(reg)];
-}
-
-
-void VirtualFrame::set_register_location(Register reg, int index) {
-  register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-}
-
-
-bool VirtualFrame::is_used(Register reg) {
-  return register_locations_[RegisterAllocator::ToNumber(reg)]
-      != kIllegalIndex;
-}
-
-
-void VirtualFrame::SetElementAt(int index, Handle<Object> value) {
-  Result temp(value);
-  SetElementAt(index, &temp);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  return RawCallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() {
-  return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() {
-  return cgen()->scope()->num_stack_slots();
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/src/virtual-frame-heavy.cc b/src/virtual-frame-heavy.cc
deleted file mode 100644
index 7270280..0000000
--- a/src/virtual-frame-heavy.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::SetElementAt(int index, Result* value) {
-  int frame_index = element_count() - index - 1;
-  ASSERT(frame_index >= 0);
-  ASSERT(frame_index < element_count());
-  ASSERT(value->is_valid());
-  FrameElement original = elements_[frame_index];
-
-  // Early exit if the element is the same as the one being set.
-  bool same_register = original.is_register()
-      && value->is_register()
-      && original.reg().is(value->reg());
-  bool same_constant = original.is_constant()
-      && value->is_constant()
-      && original.handle().is_identical_to(value->handle());
-  if (same_register || same_constant) {
-    value->Unuse();
-    return;
-  }
-
-  InvalidateFrameSlotAt(frame_index);
-
-  if (value->is_register()) {
-    if (is_used(value->reg())) {
-      // The register already appears on the frame.  Either the existing
-      // register element, or the new element at frame_index, must be made
-      // a copy.
-      int i = register_location(value->reg());
-
-      if (i < frame_index) {
-        // The register FrameElement is lower in the frame than the new copy.
-        elements_[frame_index] = CopyElementAt(i);
-      } else {
-        // There was an early bailout for the case of setting a
-        // register element to itself.
-        ASSERT(i != frame_index);
-        elements_[frame_index] = elements_[i];
-        elements_[i] = CopyElementAt(frame_index);
-        if (elements_[frame_index].is_synced()) {
-          elements_[i].set_sync();
-        }
-        elements_[frame_index].clear_sync();
-        set_register_location(value->reg(), frame_index);
-        for (int j = i + 1; j < element_count(); j++) {
-          if (elements_[j].is_copy() && elements_[j].index() == i) {
-            elements_[j].set_index(frame_index);
-          }
-        }
-      }
-    } else {
-      // The register value->reg() was not already used on the frame.
-      Use(value->reg(), frame_index);
-      elements_[frame_index] =
-          FrameElement::RegisterElement(value->reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        value->type_info());
-    }
-  } else {
-    ASSERT(value->is_constant());
-    elements_[frame_index] =
-        FrameElement::ConstantElement(value->handle(),
-                                      FrameElement::NOT_SYNCED);
-  }
-  value->Unuse();
-}
-
-
-// Create a duplicate of an existing valid frame element.
-// We can pass an optional number type information that will override the
-// existing information about the backing element. The new information must
-// not conflict with the existing type information and must be equally or
-// more precise. The default parameter value kUninitialized means that there
-// is no additional information.
-FrameElement VirtualFrame::CopyElementAt(int index, TypeInfo info) {
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  FrameElement target = elements_[index];
-  FrameElement result;
-
-  switch (target.type()) {
-    case FrameElement::CONSTANT:
-      // We do not copy constants and instead return a fresh unsynced
-      // constant.
-      result = FrameElement::ConstantElement(target.handle(),
-                                             FrameElement::NOT_SYNCED);
-      break;
-
-    case FrameElement::COPY:
-      // We do not allow copies of copies, so we follow one link to
-      // the actual backing store of a copy before making a copy.
-      index = target.index();
-      ASSERT(elements_[index].is_memory() || elements_[index].is_register());
-      // Fall through.
-
-    case FrameElement::MEMORY:  // Fall through.
-    case FrameElement::REGISTER: {
-      // All copies are backed by memory or register locations.
-      result.set_type(FrameElement::COPY);
-      result.clear_copied();
-      result.clear_sync();
-      result.set_index(index);
-      elements_[index].set_copied();
-      // Update backing element's number information.
-      TypeInfo existing = elements_[index].type_info();
-      ASSERT(!existing.IsUninitialized());
-      // Assert that the new type information (a) does not conflict with the
-      // existing one and (b) is equally or more precise.
-      ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt());
-      ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt());
-
-      elements_[index].set_type_info(!info.IsUninitialized()
-                                       ? info
-                                       : existing);
-      break;
-    }
-    case FrameElement::INVALID:
-      // We should not try to copy invalid elements.
-      UNREACHABLE();
-      break;
-  }
-  return result;
-}
-
-
-// Modify the state of the virtual frame to match the actual frame by adding
-// extra in-memory elements to the top of the virtual frame.  The extra
-// elements will be externally materialized on the actual frame (eg, by
-// pushing an exception handler).  No code is emitted.
-void VirtualFrame::Adjust(int count) {
-  ASSERT(count >= 0);
-  ASSERT(stack_pointer_ == element_count() - 1);
-
-  for (int i = 0; i < count; i++) {
-    elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown()));
-  }
-  stack_pointer_ += count;
-}
-
-
-void VirtualFrame::ForgetElements(int count) {
-  ASSERT(count >= 0);
-  ASSERT(element_count() >= count);
-
-  for (int i = 0; i < count; i++) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      // A hack to properly count register references for the code
-      // generator's current frame and also for other frames.  The
-      // same code appears in PrepareMergeTo.
-      if (cgen()->frame() == this) {
-        Unuse(last.reg());
-      } else {
-        set_register_location(last.reg(), kIllegalIndex);
-      }
-    }
-  }
-}
-
-
-// Make the type of the element at a given index be MEMORY.
-void VirtualFrame::SpillElementAt(int index) {
-  if (!elements_[index].is_valid()) return;
-
-  SyncElementAt(index);
-  // Number type information is preserved.
-  // Copies get their number information from their backing element.
-  TypeInfo info;
-  if (!elements_[index].is_copy()) {
-    info = elements_[index].type_info();
-  } else {
-    info = elements_[elements_[index].index()].type_info();
-  }
-  // The element is now in memory.  Its copied flag is preserved.
-  FrameElement new_element = FrameElement::MemoryElement(info);
-  if (elements_[index].is_copied()) {
-    new_element.set_copied();
-  }
-  if (elements_[index].is_untagged_int32()) {
-    new_element.set_untagged_int32(true);
-  }
-  if (elements_[index].is_register()) {
-    Unuse(elements_[index].reg());
-  }
-  elements_[index] = new_element;
-}
-
-
-// Clear the dirty bit for the element at a given index.
-void VirtualFrame::SyncElementAt(int index) {
-  if (index <= stack_pointer_) {
-    if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index);
-  } else if (index == stack_pointer_ + 1) {
-    SyncElementByPushing(index);
-  } else {
-    SyncRange(stack_pointer_ + 1, index);
-  }
-}
-
-
-void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) {
-  // Perform state changes on this frame that will make merge to the
-  // expected frame simpler or else increase the likelihood that his
-  // frame will match another.
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement source = elements_[i];
-    FrameElement target = expected->elements_[i];
-
-    if (!target.is_valid() ||
-        (target.is_memory() && !source.is_memory() && source.is_synced())) {
-      // No code needs to be generated to invalidate valid elements.
-      // No code needs to be generated to move values to memory if
-      // they are already synced.  We perform those moves here, before
-      // merging.
-      if (source.is_register()) {
-        // If the frame is the code generator's current frame, we have
-        // to decrement both the frame-internal and global register
-        // counts.
-        if (cgen()->frame() == this) {
-          Unuse(source.reg());
-        } else {
-          set_register_location(source.reg(), kIllegalIndex);
-        }
-      }
-      elements_[i] = target;
-    } else if (target.is_register() && !target.is_synced() &&
-               !source.is_memory()) {
-      // If an element's target is a register that doesn't need to be
-      // synced, and the element is not in memory, then the sync state
-      // of the element is irrelevant.  We clear the sync bit.
-      ASSERT(source.is_valid());
-      elements_[i].clear_sync();
-    }
-  }
-}
-
-
-void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) {
-  ASSERT(height() >= dropped_args);
-  ASSERT(height() >= spilled_args);
-  ASSERT(dropped_args <= spilled_args);
-
-  SyncRange(0, element_count() - 1);
-  // Spill registers.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (is_used(i)) {
-      SpillElementAt(register_location(i));
-    }
-  }
-
-  // Spill the arguments.
-  for (int i = element_count() - spilled_args; i < element_count(); i++) {
-    if (!elements_[i].is_memory()) {
-      SpillElementAt(i);
-    }
-  }
-
-  // Forget the frame elements that will be popped by the call.
-  Forget(dropped_args);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
-  // Find the leftmost (ordered by register number) register whose only
-  // reference is in the frame.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    if (is_used(i) && cgen()->allocator()->count(i) == 1) {
-      SpillElementAt(register_location(i));
-      ASSERT(!cgen()->allocator()->is_used(i));
-      return RegisterAllocator::ToRegister(i);
-    }
-  }
-  return no_reg;
-}
-
-} }  // namespace v8::internal
diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h
deleted file mode 100644
index 681f93f..0000000
--- a/src/virtual-frame-light-inl.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
-#define V8_VIRTUAL_FRAME_LIGHT_INL_H_
-
-#include "codegen.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "type-info.h"
-
-#include "codegen-inl.h"
-#include "jump-target-light-inl.h"
-
-namespace v8 {
-namespace internal {
-
-VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
-    : element_count_(0),
-      top_of_stack_state_(NO_TOS_REGISTERS),
-      register_allocation_map_(0),
-      tos_known_smi_map_(0) { }
-
-
-// On entry to a function, the virtual frame already contains the receiver,
-// the parameters, and a return address.  All frame elements are in memory.
-VirtualFrame::VirtualFrame()
-    : element_count_(parameter_count() + 2),
-      top_of_stack_state_(NO_TOS_REGISTERS),
-      register_allocation_map_(0),
-      tos_known_smi_map_(0) { }
-
-
-// When cloned, a frame is a deep copy of the original.
-VirtualFrame::VirtualFrame(VirtualFrame* original)
-    : element_count_(original->element_count()),
-      top_of_stack_state_(original->top_of_stack_state_),
-      register_allocation_map_(original->register_allocation_map_),
-      tos_known_smi_map_(0) { }
-
-
-bool VirtualFrame::Equals(const VirtualFrame* other) {
-  ASSERT(element_count() == other->element_count());
-  if (top_of_stack_state_ != other->top_of_stack_state_) return false;
-  if (register_allocation_map_ != other->register_allocation_map_) return false;
-  if (tos_known_smi_map_ != other->tos_known_smi_map_) return false;
-
-  return true;
-}
-
-
-void VirtualFrame::PrepareForReturn() {
-  // Don't bother flushing tos registers as returning does not require more
-  // access to the expression stack.
-  top_of_stack_state_ = NO_TOS_REGISTERS;
-}
-
-
-VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
-    CodeGenerator* cgen)
-  : cgen_(cgen),
-    old_is_spilled_(
-        Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
-  Isolate::Current()->set_is_virtual_frame_in_spilled_scope(false);
-  if (old_is_spilled_) {
-    VirtualFrame* frame = cgen->frame();
-    if (frame != NULL) {
-      frame->AssertIsSpilled();
-    }
-  }
-}
-
-
-VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
-  Isolate::Current()->set_is_virtual_frame_in_spilled_scope(old_is_spilled_);
-  if (old_is_spilled_) {
-    VirtualFrame* frame = cgen_->frame();
-    if (frame != NULL) {
-      frame->SpillAll();
-    }
-  }
-}
-
-
-CodeGenerator* VirtualFrame::cgen() const {
-  return CodeGeneratorScope::Current(Isolate::Current());
-}
-
-
-MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
-
-
-void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
-  if (arg_count != 0) Forget(arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  masm()->CallStub(stub);
-}
-
-
-int VirtualFrame::parameter_count() const {
-  return cgen()->scope()->num_parameters();
-}
-
-
-int VirtualFrame::local_count() const {
-  return cgen()->scope()->num_stack_slots();
-}
-
-
-int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
-
-
-int VirtualFrame::context_index() { return frame_pointer() - 1; }
-
-
-int VirtualFrame::function_index() { return frame_pointer() - 2; }
-
-
-int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
-
-
-int VirtualFrame::fp_relative(int index) {
-  ASSERT(index < element_count());
-  ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-  return (frame_pointer() - index) * kPointerSize;
-}
-
-
-int VirtualFrame::expression_base_index() const {
-  return local0_index() + local_count();
-}
-
-
-int VirtualFrame::height() const {
-  return element_count() - expression_base_index();
-}
-
-
-MemOperand VirtualFrame::LocalAt(int index) {
-  ASSERT(0 <= index);
-  ASSERT(index < local_count());
-  return MemOperand(fp, kLocal0Offset - index * kPointerSize);
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/src/virtual-frame-light.cc b/src/virtual-frame-light.cc
deleted file mode 100644
index bbaaaf5..0000000
--- a/src/virtual-frame-light.cc
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void VirtualFrame::Adjust(int count) {
-  ASSERT(count >= 0);
-  RaiseHeight(count, 0);
-}
-
-
-// If there are any registers referenced only by the frame, spill one.
-Register VirtualFrame::SpillAnyRegister() {
-  UNIMPLEMENTED();
-  return no_reg;
-}
-
-
-InvalidVirtualFrameInitializer* kInvalidVirtualFrameInitializer = NULL;
-
-} }  // namespace v8::internal
diff --git a/src/virtual-frame.cc b/src/virtual-frame.cc
deleted file mode 100644
index 310ff59..0000000
--- a/src/virtual-frame.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
-// Specialization of List::ResizeAdd to non-inlined version for FrameElements.
-// The function ResizeAdd becomes a real function, whose implementation is the
-// inlined ResizeAddInternal.
-template <>
-void List<FrameElement,
-          FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element) {
-  ResizeAddInternal(element);
-}
-
-} }  // namespace v8::internal
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
deleted file mode 100644
index 65d1009..0000000
--- a/src/virtual-frame.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_VIRTUAL_FRAME_H_
-#define V8_VIRTUAL_FRAME_H_
-
-#include "frame-element.h"
-#include "macro-assembler.h"
-
-#include "list-inl.h"
-#include "utils.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/virtual-frame-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/virtual-frame-x64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/virtual-frame-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/virtual-frame-mips.h"
-#else
-#error Unsupported target architecture.
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Add() on List is inlined, ResizeAdd() called by Add() is inlined except for
-// Lists of FrameElements, and ResizeAddInternal() is inlined in ResizeAdd().
-template <>
-void List<FrameElement,
-          FreeStoreAllocationPolicy>::ResizeAdd(const FrameElement& element);
-} }  // namespace v8::internal
-
-#endif  // V8_VIRTUAL_FRAME_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 439236a..9541a58 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -393,9 +393,9 @@
     StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
-    StaticVisitor::VisitCodeTarget(this);
+    StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-    StaticVisitor::VisitGlobalPropertyCell(this);
+    StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
@@ -405,7 +405,7 @@
               IsPatchedReturnSequence()) ||
              (RelocInfo::IsDebugBreakSlot(mode) &&
               IsPatchedDebugBreakSlotSequence()))) {
-    StaticVisitor::VisitDebugTarget(this);
+    StaticVisitor::VisitDebugTarget(heap, this);
 #endif
   } else if (mode == RelocInfo::RUNTIME_ENTRY) {
     StaticVisitor::VisitRuntimeEntry(this);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 0744b8a..c06bc0c 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -38,22 +38,38 @@
 // -----------------------------------------------------------------------------
 // Implementation of CpuFeatures
 
-CpuFeatures::CpuFeatures()
-    : supported_(kDefaultCpuFeatures),
-      enabled_(0),
-      found_by_runtime_probing_(0) {
-}
+
+#ifdef DEBUG
+bool CpuFeatures::initialized_ = false;
+#endif
+uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
+uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-void CpuFeatures::Probe(bool portable)  {
-  ASSERT(HEAP->HasBeenSetup());
+void CpuFeatures::Probe() {
+  ASSERT(!initialized_);
+#ifdef DEBUG
+  initialized_ = true;
+#endif
   supported_ = kDefaultCpuFeatures;
-  if (portable && Serializer::enabled()) {
+  if (Serializer::enabled()) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
 
-  Assembler assm(NULL, 0);
+  const int kBufferSize = 4 * KB;
+  VirtualMemory* memory = new VirtualMemory(kBufferSize);
+  if (!memory->IsReserved()) {
+    delete memory;
+    return;
+  }
+  ASSERT(memory->size() >= static_cast<size_t>(kBufferSize));
+  if (!memory->Commit(memory->address(), kBufferSize, true/*executable*/)) {
+    delete memory;
+    return;
+  }
+
+  Assembler assm(NULL, memory->address(), kBufferSize);
   Label cpuid, done;
 #define __ assm.
   // Save old rsp, since we are going to modify the stack.
@@ -83,7 +99,7 @@
   // ecx:edx. Temporarily enable CPUID support because we know it's
   // safe here.
   __ bind(&cpuid);
-  __ movq(rax, Immediate(1));
+  __ movl(rax, Immediate(1));
   supported_ = kDefaultCpuFeatures | (1 << CPUID);
   { Scope fscope(CPUID);
     __ cpuid();
@@ -117,31 +133,20 @@
   __ ret(0);
 #undef __
 
-  CodeDesc desc;
-  assm.GetCode(&desc);
-  Isolate* isolate = Isolate::Current();
-  MaybeObject* maybe_code =
-      isolate->heap()->CreateCode(desc,
-                                  Code::ComputeFlags(Code::STUB),
-                                  Handle<Object>());
-  Object* code;
-  if (!maybe_code->ToObject(&code)) return;
-  if (!code->IsCode()) return;
-  PROFILE(isolate,
-          CodeCreateEvent(Logger::BUILTIN_TAG,
-                          Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
-  F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+  F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address()));
   supported_ = probe();
   found_by_runtime_probing_ = supported_;
   found_by_runtime_probing_ &= ~kDefaultCpuFeatures;
   uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform();
   supported_ |= os_guarantees;
-  found_by_runtime_probing_ &= portable ? ~os_guarantees : 0;
+  found_by_runtime_probing_ &= ~os_guarantees;
   // SSE2 and CMOV must be available on an X64 CPU.
   ASSERT(IsSupported(CPUID));
   ASSERT(IsSupported(SSE2));
   ASSERT(IsSupported(CMOV));
+
+  delete memory;
 }
 
 
@@ -339,8 +344,8 @@
 static void InitCoverageLog();
 #endif
 
-Assembler::Assembler(void* buffer, int buffer_size)
-    : AssemblerBase(Isolate::Current()),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+    : AssemblerBase(arg_isolate),
       code_targets_(100),
       positions_recorder_(this),
       emit_debug_code_(FLAG_debug_code) {
@@ -349,7 +354,7 @@
     if (buffer_size <= kMinimalBufferSize) {
       buffer_size = kMinimalBufferSize;
 
-      if (isolate()->assembler_spare_buffer() != NULL) {
+      if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
         buffer = isolate()->assembler_spare_buffer();
         isolate()->set_assembler_spare_buffer(NULL);
       }
@@ -383,7 +388,6 @@
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
 
-  last_pc_ = NULL;
 
 #ifdef GENERATED_CODE_COVERAGE
   InitCoverageLog();
@@ -393,7 +397,8 @@
 
 Assembler::~Assembler() {
   if (own_buffer_) {
-    if (isolate()->assembler_spare_buffer() == NULL &&
+    if (isolate() != NULL &&
+        isolate()->assembler_spare_buffer() == NULL &&
         buffer_size_ == kMinimalBufferSize) {
       isolate()->set_assembler_spare_buffer(buffer_);
     } else {
@@ -438,7 +443,6 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(!L->is_bound());  // Label may only be bound once.
-  last_pc_ = NULL;
   ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
   if (L->is_linked()) {
     int current = L->pos();
@@ -465,7 +469,6 @@
 
 void Assembler::bind(NearLabel* L) {
   ASSERT(!L->is_bound());
-  last_pc_ = NULL;
   while (L->unresolved_branches_ > 0) {
     int branch_pos = L->unresolved_positions_[L->unresolved_branches_ - 1];
     int disp = pc_offset() - branch_pos;
@@ -516,7 +519,8 @@
           reloc_info_writer.pos(), desc.reloc_size);
 
   // Switch buffers.
-  if (isolate()->assembler_spare_buffer() == NULL &&
+  if (isolate() != NULL &&
+      isolate()->assembler_spare_buffer() == NULL &&
       buffer_size_ == kMinimalBufferSize) {
     isolate()->set_assembler_spare_buffer(buffer_);
   } else {
@@ -525,9 +529,6 @@
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
-  if (last_pc_ != NULL) {
-    last_pc_ += pc_delta;
-  }
   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
                                reloc_info_writer.last_pc() + pc_delta);
 
@@ -565,7 +566,6 @@
 
 void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(reg, op);
   emit(opcode);
   emit_operand(reg, op);
@@ -574,7 +574,6 @@
 
 void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4)  {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
@@ -591,7 +590,6 @@
 
 void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
@@ -612,7 +610,6 @@
                                  Register reg,
                                  const Operand& rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(reg, rm_reg);
   emit(opcode);
@@ -622,7 +619,6 @@
 
 void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
@@ -641,7 +637,6 @@
                                  Register reg,
                                  const Operand& rm_reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(reg, rm_reg);
   emit(opcode);
   emit_operand(reg, rm_reg);
@@ -652,7 +647,6 @@
                                         Register dst,
                                         Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -672,7 +666,6 @@
                                         const Operand& dst,
                                         Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -690,7 +683,6 @@
                                            Register dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);  // Operand size override prefix.
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
@@ -712,7 +704,6 @@
                                            const Operand& dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);  // Operand size override prefix.
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
@@ -731,7 +722,6 @@
                                            Register dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -752,7 +742,6 @@
                                            const Operand& dst,
                                            Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
@@ -770,7 +759,6 @@
                                           const Operand& dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   ASSERT(is_int8(src.value_) || is_uint8(src.value_));
   emit(0x80);
@@ -783,7 +771,6 @@
                                           Register dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (dst.code() > 3) {
     // Use 64-bit mode byte registers.
     emit_rex_64(dst);
@@ -797,7 +784,6 @@
 
 void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
   if (shift_amount.value_ == 1) {
     emit_rex_64(dst);
@@ -814,7 +800,6 @@
 
 void Assembler::shift(Register dst, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xD3);
   emit_modrm(subcode, dst);
@@ -823,7 +808,6 @@
 
 void Assembler::shift_32(Register dst, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xD3);
   emit_modrm(subcode, dst);
@@ -832,7 +816,6 @@
 
 void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint5(shift_amount.value_));  // illegal shift count
   if (shift_amount.value_ == 1) {
     emit_optional_rex_32(dst);
@@ -849,7 +832,6 @@
 
 void Assembler::bt(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xA3);
@@ -859,7 +841,6 @@
 
 void Assembler::bts(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xAB);
@@ -870,7 +851,6 @@
 void Assembler::call(Label* L) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
   emit(0xE8);
   if (L->is_bound()) {
@@ -892,7 +872,6 @@
 void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
   emit(0xE8);
   emit_code_target(target, rmode);
@@ -902,7 +881,6 @@
 void Assembler::call(Register adr) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: FF /2 r64.
   emit_optional_rex_32(adr);
   emit(0xFF);
@@ -913,7 +891,6 @@
 void Assembler::call(const Operand& op) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: FF /2 m64.
   emit_optional_rex_32(op);
   emit(0xFF);
@@ -928,7 +905,6 @@
 void Assembler::call(Address target) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1000 #32-bit disp.
   emit(0xE8);
   Address source = pc_ + 4;
@@ -940,19 +916,16 @@
 
 void Assembler::clc() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF8);
 }
 
 void Assembler::cld() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xFC);
 }
 
 void Assembler::cdq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x99);
 }
 
@@ -967,7 +940,6 @@
   // 64-bit architecture.
   ASSERT(cc >= 0);  // Use mov for unconditional moves.
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
@@ -984,7 +956,6 @@
   }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
   emit(0x0f);
@@ -1001,7 +972,6 @@
   }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -1018,7 +988,6 @@
   }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -1030,16 +999,14 @@
 void Assembler::cmpb_al(Immediate imm8) {
   ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x3c);
   emit(imm8.value_);
 }
 
 
 void Assembler::cpuid() {
-  ASSERT(isolate()->cpu_features()->IsEnabled(CPUID));
+  ASSERT(CpuFeatures::IsEnabled(CPUID));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x0F);
   emit(0xA2);
 }
@@ -1047,7 +1014,6 @@
 
 void Assembler::cqo() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64();
   emit(0x99);
 }
@@ -1055,7 +1021,6 @@
 
 void Assembler::decq(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_modrm(0x1, dst);
@@ -1064,7 +1029,6 @@
 
 void Assembler::decq(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_operand(1, dst);
@@ -1073,7 +1037,6 @@
 
 void Assembler::decl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_modrm(0x1, dst);
@@ -1082,7 +1045,6 @@
 
 void Assembler::decl(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_operand(1, dst);
@@ -1091,7 +1053,6 @@
 
 void Assembler::decb(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (dst.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
@@ -1103,7 +1064,6 @@
 
 void Assembler::decb(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFE);
   emit_operand(1, dst);
@@ -1112,7 +1072,6 @@
 
 void Assembler::enter(Immediate size) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xC8);
   emitw(size.value_);  // 16 bit operand, always.
   emit(0);
@@ -1121,14 +1080,12 @@
 
 void Assembler::hlt() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF4);
 }
 
 
 void Assembler::idivq(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src);
   emit(0xF7);
   emit_modrm(0x7, src);
@@ -1137,7 +1094,6 @@
 
 void Assembler::idivl(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src);
   emit(0xF7);
   emit_modrm(0x7, src);
@@ -1146,7 +1102,6 @@
 
 void Assembler::imul(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src);
   emit(0xF7);
   emit_modrm(0x5, src);
@@ -1155,7 +1110,6 @@
 
 void Assembler::imul(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1165,7 +1119,6 @@
 
 void Assembler::imul(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1175,7 +1128,6 @@
 
 void Assembler::imul(Register dst, Register src, Immediate imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   if (is_int8(imm.value_)) {
     emit(0x6B);
@@ -1191,7 +1143,6 @@
 
 void Assembler::imull(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1201,7 +1152,6 @@
 
 void Assembler::imull(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xAF);
@@ -1211,7 +1161,6 @@
 
 void Assembler::imull(Register dst, Register src, Immediate imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   if (is_int8(imm.value_)) {
     emit(0x6B);
@@ -1227,7 +1176,6 @@
 
 void Assembler::incq(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_modrm(0x0, dst);
@@ -1236,7 +1184,6 @@
 
 void Assembler::incq(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xFF);
   emit_operand(0, dst);
@@ -1245,7 +1192,6 @@
 
 void Assembler::incl(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_operand(0, dst);
@@ -1254,7 +1200,6 @@
 
 void Assembler::incl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xFF);
   emit_modrm(0, dst);
@@ -1263,7 +1208,6 @@
 
 void Assembler::int3() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xCC);
 }
 
@@ -1276,7 +1220,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint4(cc));
   if (L->is_bound()) {
     const int short_size = 2;
@@ -1314,7 +1257,6 @@
                   Handle<Code> target,
                   RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint4(cc));
   // 0000 1111 1000 tttn #32-bit disp.
   emit(0x0F);
@@ -1325,7 +1267,6 @@
 
 void Assembler::j(Condition cc, NearLabel* L, Hint hint) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(0 <= cc && cc < 16);
   if (FLAG_emit_branch_hints && hint != no_hint) emit(hint);
   if (L->is_bound()) {
@@ -1346,7 +1287,6 @@
 
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   const int short_size = sizeof(int8_t);
   const int long_size = sizeof(int32_t);
   if (L->is_bound()) {
@@ -1379,7 +1319,6 @@
 
 void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // 1110 1001 #32-bit disp.
   emit(0xE9);
   emit_code_target(target, rmode);
@@ -1388,7 +1327,6 @@
 
 void Assembler::jmp(NearLabel* L) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (L->is_bound()) {
     const int short_size = sizeof(int8_t);
     int offs = L->pos() - pc_offset();
@@ -1407,7 +1345,6 @@
 
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode FF/4 r64.
   emit_optional_rex_32(target);
   emit(0xFF);
@@ -1417,7 +1354,6 @@
 
 void Assembler::jmp(const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   // Opcode FF/4 m64.
   emit_optional_rex_32(src);
   emit(0xFF);
@@ -1427,7 +1363,6 @@
 
 void Assembler::lea(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x8D);
   emit_operand(dst, src);
@@ -1436,7 +1371,6 @@
 
 void Assembler::leal(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x8D);
   emit_operand(dst, src);
@@ -1445,7 +1379,6 @@
 
 void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x48);  // REX.W
   emit(0xA1);
   emitq(reinterpret_cast<uintptr_t>(value), mode);
@@ -1459,15 +1392,18 @@
 
 void Assembler::leave() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xC9);
 }
 
 
 void Assembler::movb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit_rex_32(dst, src);
+  if (dst.code() > 3) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst, src);
+  } else {
+    emit_optional_rex_32(dst, src);
+  }
   emit(0x8A);
   emit_operand(dst, src);
 }
@@ -1475,18 +1411,21 @@
 
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit_rex_32(dst);
-  emit(0xC6);
-  emit_modrm(0x0, dst);
+  if (dst.code() > 3) {
+    emit_rex_32(dst);
+  }
+  emit(0xB0 + dst.low_bits());
   emit(imm.value_);
 }
 
 
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit_rex_32(src, dst);
+  if (src.code() > 3) {
+    emit_rex_32(src, dst);
+  } else {
+    emit_optional_rex_32(src, dst);
+  }
   emit(0x88);
   emit_operand(src, dst);
 }
@@ -1494,7 +1433,6 @@
 
 void Assembler::movw(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(src, dst);
   emit(0x89);
@@ -1504,7 +1442,6 @@
 
 void Assembler::movl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x8B);
   emit_operand(dst, src);
@@ -1513,7 +1450,6 @@
 
 void Assembler::movl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_optional_rex_32(src, dst);
     emit(0x89);
@@ -1528,7 +1464,6 @@
 
 void Assembler::movl(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src, dst);
   emit(0x89);
   emit_operand(src, dst);
@@ -1537,27 +1472,23 @@
 
 void Assembler::movl(const Operand& dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xC7);
   emit_operand(0x0, dst);
-  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+  emit(value);
 }
 
 
 void Assembler::movl(Register dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
-  emit(0xC7);
-  emit_modrm(0x0, dst);
-  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+  emit(0xB8 + dst.low_bits());
+  emit(value);
 }
 
 
 void Assembler::movq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x8B);
   emit_operand(dst, src);
@@ -1566,7 +1497,6 @@
 
 void Assembler::movq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_rex_64(src, dst);
     emit(0x89);
@@ -1581,7 +1511,6 @@
 
 void Assembler::movq(Register dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xC7);
   emit_modrm(0x0, dst);
@@ -1591,7 +1520,6 @@
 
 void Assembler::movq(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x89);
   emit_operand(src, dst);
@@ -1603,7 +1531,6 @@
   // address is not GC safe. Use the handle version instead.
   ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xB8 | dst.low_bits());
   emitq(reinterpret_cast<uintptr_t>(value), rmode);
@@ -1625,7 +1552,6 @@
     // value.
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xB8 | dst.low_bits());
   emitq(value, rmode);
@@ -1640,7 +1566,6 @@
 
 void Assembler::movq(const Operand& dst, Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xC7);
   emit_operand(0, dst);
@@ -1652,7 +1577,6 @@
 // (as a 32-bit offset sign extended to 64-bit).
 void Assembler::movl(const Operand& dst, Label* src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xC7);
   emit_operand(0, dst);
@@ -1682,7 +1606,6 @@
     movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
   } else {
     EnsureSpace ensure_space(this);
-    last_pc_ = pc_;
     ASSERT(value->IsHeapObject());
     ASSERT(!HEAP->InNewSpace(*value));
     emit_rex_64(dst);
@@ -1694,7 +1617,6 @@
 
 void Assembler::movsxbq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xBE);
@@ -1704,7 +1626,6 @@
 
 void Assembler::movsxwq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x0F);
   emit(0xBF);
@@ -1714,7 +1635,6 @@
 
 void Assembler::movsxlq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x63);
   emit_modrm(dst, src);
@@ -1723,7 +1643,6 @@
 
 void Assembler::movsxlq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst, src);
   emit(0x63);
   emit_operand(dst, src);
@@ -1732,7 +1651,6 @@
 
 void Assembler::movzxbq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB6);
@@ -1742,7 +1660,6 @@
 
 void Assembler::movzxbl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB6);
@@ -1752,7 +1669,6 @@
 
 void Assembler::movzxwq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB7);
@@ -1762,7 +1678,6 @@
 
 void Assembler::movzxwl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB7);
@@ -1772,7 +1687,6 @@
 
 void Assembler::repmovsb() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit(0xA4);
 }
@@ -1780,7 +1694,6 @@
 
 void Assembler::repmovsw() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);  // Operand size override.
   emit(0xF3);
   emit(0xA4);
@@ -1789,7 +1702,6 @@
 
 void Assembler::repmovsl() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit(0xA5);
 }
@@ -1797,7 +1709,6 @@
 
 void Assembler::repmovsq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_rex_64();
   emit(0xA5);
@@ -1806,7 +1717,6 @@
 
 void Assembler::mul(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src);
   emit(0xF7);
   emit_modrm(0x4, src);
@@ -1815,7 +1725,6 @@
 
 void Assembler::neg(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_modrm(0x3, dst);
@@ -1824,7 +1733,6 @@
 
 void Assembler::negl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xF7);
   emit_modrm(0x3, dst);
@@ -1833,7 +1741,6 @@
 
 void Assembler::neg(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_operand(3, dst);
@@ -1842,14 +1749,12 @@
 
 void Assembler::nop() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x90);
 }
 
 
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_modrm(0x2, dst);
@@ -1858,7 +1763,6 @@
 
 void Assembler::not_(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(dst);
   emit(0xF7);
   emit_operand(2, dst);
@@ -1867,7 +1771,6 @@
 
 void Assembler::notl(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0xF7);
   emit_modrm(0x2, dst);
@@ -1892,7 +1795,6 @@
   ASSERT(1 <= n);
   ASSERT(n <= 9);
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   switch (n) {
   case 1:
     emit(0x90);
@@ -1963,7 +1865,6 @@
 
 void Assembler::pop(Register dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0x58 | dst.low_bits());
 }
@@ -1971,7 +1872,6 @@
 
 void Assembler::pop(const Operand& dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(dst);
   emit(0x8F);
   emit_operand(0, dst);
@@ -1980,14 +1880,12 @@
 
 void Assembler::popfq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9D);
 }
 
 
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src);
   emit(0x50 | src.low_bits());
 }
@@ -1995,7 +1893,6 @@
 
 void Assembler::push(const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(src);
   emit(0xFF);
   emit_operand(6, src);
@@ -2004,7 +1901,6 @@
 
 void Assembler::push(Immediate value) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (is_int8(value.value_)) {
     emit(0x6A);
     emit(value.value_);  // Emit low byte of value.
@@ -2017,7 +1913,6 @@
 
 void Assembler::push_imm32(int32_t imm32) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x68);
   emitl(imm32);
 }
@@ -2025,14 +1920,12 @@
 
 void Assembler::pushfq() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9C);
 }
 
 
 void Assembler::rdtsc() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x0F);
   emit(0x31);
 }
@@ -2040,7 +1933,6 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint16(imm16));
   if (imm16 == 0) {
     emit(0xC3);
@@ -2058,7 +1950,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   ASSERT(is_uint4(cc));
   if (reg.code() > 3) {  // Use x64 byte registers, where different.
     emit_rex_32(reg);
@@ -2071,7 +1962,6 @@
 
 void Assembler::shld(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xA5);
@@ -2081,7 +1971,6 @@
 
 void Assembler::shrd(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(src, dst);
   emit(0x0F);
   emit(0xAD);
@@ -2091,7 +1980,6 @@
 
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.is(rax) || dst.is(rax)) {  // Single-byte encoding
     Register other = src.is(rax) ? dst : src;
     emit_rex_64(other);
@@ -2110,7 +1998,6 @@
 
 void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x48);  // REX.W
   emit(0xA3);
   emitq(reinterpret_cast<uintptr_t>(dst), mode);
@@ -2124,7 +2011,6 @@
 
 void Assembler::testb(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_rex_32(src, dst);
     emit(0x84);
@@ -2143,7 +2029,6 @@
 void Assembler::testb(Register reg, Immediate mask) {
   ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (reg.is(rax)) {
     emit(0xA8);
     emit(mask.value_);  // Low byte emitted.
@@ -2162,7 +2047,6 @@
 void Assembler::testb(const Operand& op, Immediate mask) {
   ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(rax, op);
   emit(0xF6);
   emit_operand(rax, op);  // Operation code 0
@@ -2172,7 +2056,6 @@
 
 void Assembler::testb(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (reg.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(reg, op);
@@ -2186,7 +2069,6 @@
 
 void Assembler::testl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_optional_rex_32(src, dst);
     emit(0x85);
@@ -2206,7 +2088,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (reg.is(rax)) {
     emit(0xA9);
     emit(mask);
@@ -2226,7 +2107,6 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(rax, op);
   emit(0xF7);
   emit_operand(rax, op);  // Operation code 0
@@ -2236,7 +2116,6 @@
 
 void Assembler::testq(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_rex_64(reg, op);
   emit(0x85);
   emit_operand(reg, op);
@@ -2245,7 +2124,6 @@
 
 void Assembler::testq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (src.low_bits() == 4) {
     emit_rex_64(src, dst);
     emit(0x85);
@@ -2260,7 +2138,6 @@
 
 void Assembler::testq(Register dst, Immediate mask) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   if (dst.is(rax)) {
     emit_rex_64();
     emit(0xA9);
@@ -2279,14 +2156,12 @@
 
 void Assembler::fld(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC0, i);
 }
 
 
 void Assembler::fld1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE8);
 }
@@ -2294,7 +2169,6 @@
 
 void Assembler::fldz() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xEE);
 }
@@ -2302,7 +2176,6 @@
 
 void Assembler::fldpi() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xEB);
 }
@@ -2310,7 +2183,6 @@
 
 void Assembler::fldln2() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xED);
 }
@@ -2318,7 +2190,6 @@
 
 void Assembler::fld_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xD9);
   emit_operand(0, adr);
@@ -2327,7 +2198,6 @@
 
 void Assembler::fld_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDD);
   emit_operand(0, adr);
@@ -2336,7 +2206,6 @@
 
 void Assembler::fstp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xD9);
   emit_operand(3, adr);
@@ -2345,7 +2214,6 @@
 
 void Assembler::fstp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDD);
   emit_operand(3, adr);
@@ -2355,14 +2223,12 @@
 void Assembler::fstp(int index) {
   ASSERT(is_uint3(index));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xD8, index);
 }
 
 
 void Assembler::fild_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(0, adr);
@@ -2371,7 +2237,6 @@
 
 void Assembler::fild_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDF);
   emit_operand(5, adr);
@@ -2380,7 +2245,6 @@
 
 void Assembler::fistp_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(3, adr);
@@ -2388,9 +2252,8 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(1, adr);
@@ -2398,9 +2261,8 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE3));
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDD);
   emit_operand(1, adr);
@@ -2409,7 +2271,6 @@
 
 void Assembler::fist_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDB);
   emit_operand(2, adr);
@@ -2418,7 +2279,6 @@
 
 void Assembler::fistp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDF);
   emit_operand(7, adr);
@@ -2427,7 +2287,6 @@
 
 void Assembler::fabs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE1);
 }
@@ -2435,7 +2294,6 @@
 
 void Assembler::fchs() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE0);
 }
@@ -2443,7 +2301,6 @@
 
 void Assembler::fcos() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xFF);
 }
@@ -2451,7 +2308,6 @@
 
 void Assembler::fsin() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xFE);
 }
@@ -2459,7 +2315,6 @@
 
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF1);
 }
@@ -2467,21 +2322,18 @@
 
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC0, i);
 }
 
 
 void Assembler::fsub(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xE8, i);
 }
 
 
 void Assembler::fisub_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDA);
   emit_operand(4, adr);
@@ -2490,56 +2342,48 @@
 
 void Assembler::fmul(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xC8, i);
 }
 
 
 void Assembler::fdiv(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDC, 0xF8, i);
 }
 
 
 void Assembler::faddp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC0, i);
 }
 
 
 void Assembler::fsubp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE8, i);
 }
 
 
 void Assembler::fsubrp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xE0, i);
 }
 
 
 void Assembler::fmulp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xC8, i);
 }
 
 
 void Assembler::fdivp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDE, 0xF8, i);
 }
 
 
 void Assembler::fprem() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF8);
 }
@@ -2547,7 +2391,6 @@
 
 void Assembler::fprem1() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF5);
 }
@@ -2555,14 +2398,12 @@
 
 void Assembler::fxch(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xD9, 0xC8, i);
 }
 
 
 void Assembler::fincstp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xF7);
 }
@@ -2570,14 +2411,12 @@
 
 void Assembler::ffree(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xC0, i);
 }
 
 
 void Assembler::ftst() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xE4);
 }
@@ -2585,14 +2424,12 @@
 
 void Assembler::fucomp(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit_farith(0xDD, 0xE8, i);
 }
 
 
 void Assembler::fucompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDA);
   emit(0xE9);
 }
@@ -2600,7 +2437,6 @@
 
 void Assembler::fucomi(int i) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDB);
   emit(0xE8 + i);
 }
@@ -2608,7 +2444,6 @@
 
 void Assembler::fucomip() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDF);
   emit(0xE9);
 }
@@ -2616,7 +2451,6 @@
 
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDE);
   emit(0xD9);
 }
@@ -2624,7 +2458,6 @@
 
 void Assembler::fnstsw_ax() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDF);
   emit(0xE0);
 }
@@ -2632,14 +2465,12 @@
 
 void Assembler::fwait() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9B);
 }
 
 
 void Assembler::frndint() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xD9);
   emit(0xFC);
 }
@@ -2647,7 +2478,6 @@
 
 void Assembler::fnclex() {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xDB);
   emit(0xE2);
 }
@@ -2657,7 +2487,6 @@
   // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
   // in 64-bit mode. Test CpuID.
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x9E);
 }
 
@@ -2673,7 +2502,6 @@
 
 void Assembler::movd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2684,7 +2512,6 @@
 
 void Assembler::movd(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(src, dst);
   emit(0x0F);
@@ -2695,7 +2522,6 @@
 
 void Assembler::movq(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2706,7 +2532,6 @@
 
 void Assembler::movq(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_rex_64(src, dst);
   emit(0x0F);
@@ -2715,10 +2540,26 @@
 }
 
 
-void Assembler::movdqa(const Operand& dst, XMMRegister src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
+void Assembler::movq(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
+  if (dst.low_bits() == 4) {
+    // Avoid unnecessary SIB byte.
+    emit(0xf3);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x7e);
+    emit_sse_operand(dst, src);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0xD6);
+    emit_sse_operand(src, dst);
+  }
+}
+
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
   emit(0x66);
   emit_rex_64(src, dst);
   emit(0x0F);
@@ -2728,9 +2569,7 @@
 
 
 void Assembler::movdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(isolate()->cpu_features()->IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2742,7 +2581,6 @@
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
   ASSERT(is_uint2(imm8));
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2755,7 +2593,6 @@
 
 void Assembler::movsd(const Operand& dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);  // double
   emit_optional_rex_32(src, dst);
   emit(0x0F);
@@ -2766,7 +2603,6 @@
 
 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);  // double
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2777,7 +2613,6 @@
 
 void Assembler::movsd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);  // double
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2786,9 +2621,44 @@
 }
 
 
+void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
+void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  if (src.low_bits() == 4) {
+    // Try to avoid an unnecessary SIB byte.
+    emit(0x66);
+    emit_optional_rex_32(src, dst);
+    emit(0x0F);
+    emit(0x29);
+    emit_sse_operand(src, dst);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(dst, src);
+    emit(0x0F);
+    emit(0x28);
+    emit_sse_operand(dst, src);
+  }
+}
+
+
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);  // single
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2799,7 +2669,6 @@
 
 void Assembler::movss(const Operand& src, XMMRegister dst) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);  // single
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2810,7 +2679,6 @@
 
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2821,7 +2689,6 @@
 
 void Assembler::cvttss2si(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2832,7 +2699,6 @@
 
 void Assembler::cvttsd2si(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2843,7 +2709,6 @@
 
 void Assembler::cvttsd2si(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2854,7 +2719,6 @@
 
 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2865,7 +2729,6 @@
 
 void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2876,7 +2739,6 @@
 
 void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2887,7 +2749,6 @@
 
 void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2898,7 +2759,6 @@
 
 void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2909,7 +2769,6 @@
 
 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2920,7 +2779,6 @@
 
 void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF3);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2931,7 +2789,6 @@
 
 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2942,7 +2799,6 @@
 
 void Assembler::cvtsd2si(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2953,7 +2809,6 @@
 
 void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_rex_64(dst, src);
   emit(0x0F);
@@ -2964,7 +2819,6 @@
 
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2975,7 +2829,6 @@
 
 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2986,7 +2839,6 @@
 
 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -2997,7 +2849,6 @@
 
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3008,7 +2859,6 @@
 
 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3019,7 +2869,6 @@
 
 void Assembler::orpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3030,7 +2879,6 @@
 
 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3039,9 +2887,17 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0xF2);
   emit_optional_rex_32(dst, src);
   emit(0x0F);
@@ -3052,7 +2908,6 @@
 
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -3063,7 +2918,6 @@
 
 void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0f);
@@ -3072,9 +2926,23 @@
 }
 
 
+void Assembler::roundsd(XMMRegister dst, XMMRegister src,
+                        Assembler::RoundingMode mode) {
+  ASSERT(CpuFeatures::IsEnabled(SSE4_1));
+  EnsureSpace ensure_space(this);
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x3a);
+  emit(0x0b);
+  emit_sse_operand(dst, src);
+  // Mask precision exeption.
+  emit(static_cast<byte>(mode) | 0x8);
+}
+
+
 void Assembler::movmskpd(Register dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
   emit(0x66);
   emit_optional_rex_32(dst, src);
   emit(0x0f);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 52aca63..8a9938b 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -434,14 +434,15 @@
 //   } else {
 //     // Generate standard x87 or SSE2 floating point code.
 //   }
-class CpuFeatures {
+class CpuFeatures : public AllStatic {
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  void Probe(bool portable);
+  static void Probe();
 
   // Check whether a feature is supported by the target CPU.
-  bool IsSupported(CpuFeature f) const {
+  static bool IsSupported(CpuFeature f) {
+    ASSERT(initialized_);
     if (f == SSE2 && !FLAG_enable_sse2) return false;
     if (f == SSE3 && !FLAG_enable_sse3) return false;
     if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -449,51 +450,65 @@
     if (f == SAHF && !FLAG_enable_sahf) return false;
     return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
+
+#ifdef DEBUG
   // Check whether a feature is currently enabled.
-  bool IsEnabled(CpuFeature f) const {
-    return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
+  static bool IsEnabled(CpuFeature f) {
+    ASSERT(initialized_);
+    Isolate* isolate = Isolate::UncheckedCurrent();
+    if (isolate == NULL) {
+      // When no isolate is available, work as if we're running in
+      // release mode.
+      return IsSupported(f);
+    }
+    uint64_t enabled = isolate->enabled_cpu_features();
+    return (enabled & (V8_UINT64_C(1) << f)) != 0;
   }
+#endif
+
   // Enable a specified feature within a scope.
   class Scope BASE_EMBEDDED {
 #ifdef DEBUG
    public:
-    explicit Scope(CpuFeature f)
-        : cpu_features_(Isolate::Current()->cpu_features()),
-          isolate_(Isolate::Current()) {
-      uint64_t mask = (V8_UINT64_C(1) << f);
-      ASSERT(cpu_features_->IsSupported(f));
+    explicit Scope(CpuFeature f) {
+      uint64_t mask = V8_UINT64_C(1) << f;
+      ASSERT(CpuFeatures::IsSupported(f));
       ASSERT(!Serializer::enabled() ||
-          (cpu_features_->found_by_runtime_probing_ & mask) == 0);
-      old_enabled_ = cpu_features_->enabled_;
-      cpu_features_->enabled_ |= mask;
+             (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+      isolate_ = Isolate::UncheckedCurrent();
+      old_enabled_ = 0;
+      if (isolate_ != NULL) {
+        old_enabled_ = isolate_->enabled_cpu_features();
+        isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+      }
     }
     ~Scope() {
-      ASSERT_EQ(Isolate::Current(), isolate_);
-      cpu_features_->enabled_ = old_enabled_;
+      ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+      if (isolate_ != NULL) {
+        isolate_->set_enabled_cpu_features(old_enabled_);
+      }
     }
    private:
-    uint64_t old_enabled_;
-    CpuFeatures* cpu_features_;
     Isolate* isolate_;
+    uint64_t old_enabled_;
 #else
    public:
     explicit Scope(CpuFeature f) {}
 #endif
   };
- private:
-  CpuFeatures();
 
+ private:
   // Safe defaults include SSE2 and CMOV for X64. It is always available, if
   // anyone checks, but they shouldn't need to check.
   // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
   //   fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
   static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
 
-  uint64_t supported_;
-  uint64_t enabled_;
-  uint64_t found_by_runtime_probing_;
-
-  friend class Isolate;
+#ifdef DEBUG
+  static bool initialized_;
+#endif
+  static uint64_t supported_;
+  static uint64_t found_by_runtime_probing_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
 };
@@ -526,7 +541,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
   ~Assembler();
 
   // Overrides the default provided by FLAG_debug_code.
@@ -1276,15 +1291,24 @@
   void movd(Register dst, XMMRegister src);
   void movq(XMMRegister dst, Register src);
   void movq(Register dst, XMMRegister src);
+  void movq(XMMRegister dst, XMMRegister src);
   void extractps(Register dst, XMMRegister src, byte imm8);
 
-  void movsd(const Operand& dst, XMMRegister src);
+  // Don't use this unless it's important to keep the
+  // top half of the destination register unchanged.
+  // Used movaps when moving double values and movq for integer
+  // values in xmm registers.
   void movsd(XMMRegister dst, XMMRegister src);
+
+  void movsd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, const Operand& src);
 
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqa(XMMRegister dst, const Operand& src);
 
+  void movapd(XMMRegister dst, XMMRegister src);
+  void movaps(XMMRegister dst, XMMRegister src);
+
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
 
@@ -1316,11 +1340,21 @@
   void andpd(XMMRegister dst, XMMRegister src);
   void orpd(XMMRegister dst, XMMRegister src);
   void xorpd(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, const Operand& src);
 
+  enum RoundingMode {
+    kRoundToNearest = 0x0,
+    kRoundDown      = 0x1,
+    kRoundUp        = 0x2,
+    kRoundToZero    = 0x3
+  };
+
+  void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
+
   void movmskpd(Register dst, XMMRegister src);
 
   // The first argument is the reg field, the second argument is the r/m field.
@@ -1574,8 +1608,6 @@
   RelocInfoWriter reloc_info_writer;
 
   List< Handle<Code> > code_targets_;
-  // push-pop elimination
-  byte* last_pc_;
 
   PositionsRecorder positions_recorder_;
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 21d3e54..a549633 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "deoptimizer.h"
 #include "full-codegen.h"
 
@@ -96,7 +96,7 @@
   // rax: number of arguments
   __ bind(&non_function_call);
   // Set expected number of arguments to zero (not changing rax).
-  __ movq(rbx, Immediate(0));
+  __ Set(rbx, 0);
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -1372,7 +1372,7 @@
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
-    __ movq(rcx, Immediate(-1));  // account for receiver
+    __ Set(rcx, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
@@ -1391,7 +1391,7 @@
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
-    __ movq(rcx, Immediate(-1));  // account for receiver
+    __ Set(rcx, -1);  // account for receiver
 
     Label copy;
     __ bind(&copy);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0fb827b..76fcc88 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -266,14 +266,14 @@
   __ j(not_equal, &true_result);
   // HeapNumber => false iff +0, -0, or NaN.
   // These three cases set the zero flag when compared to zero using ucomisd.
-  __ xorpd(xmm0, xmm0);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
   __ j(zero, &false_result);
   // Fall through to |true_result|.
 
   // Return 1/0 for true/false in rax.
   __ bind(&true_result);
-  __ movq(rax, Immediate(1));
+  __ Set(rax, 1);
   __ ret(1 * kPointerSize);
   __ bind(&false_result);
   __ Set(rax, 0);
@@ -281,166 +281,6 @@
 }
 
 
-const char* GenericBinaryOpStub::GetName() {
-  if (name_ != NULL) return name_;
-  const int kMaxNameLength = 100;
-  name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
-      kMaxNameLength);
-  if (name_ == NULL) return "OOM";
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-
-  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
-               op_name,
-               overwrite_name,
-               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
-               args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "",
-               static_operands_type_.ToString(),
-               BinaryOpIC::GetName(runtime_operands_type_));
-  return name_;
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (!(left.is(left_arg) && right.is(right_arg))) {
-      if (left.is(right_arg) && right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          SetArgsReversed();
-        } else {
-          __ xchg(left, right);
-        }
-      } else if (left.is(left_arg)) {
-        __ movq(right_arg, right);
-      } else if (right.is(right_arg)) {
-        __ movq(left_arg, left);
-      } else if (left.is(right_arg)) {
-        if (IsOperationCommutative()) {
-          __ movq(left_arg, right);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying left argument.
-          __ movq(left_arg, left);
-          __ movq(right_arg, right);
-        }
-      } else if (right.is(left_arg)) {
-        if (IsOperationCommutative()) {
-          __ movq(right_arg, left);
-          SetArgsReversed();
-        } else {
-          // Order of moves important to avoid destroying right argument.
-          __ movq(right_arg, right);
-          __ movq(left_arg, left);
-        }
-      } else {
-        // Order of moves is not important.
-        __ movq(left_arg, left);
-        __ movq(right_arg, right);
-      }
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Register left,
-    Smi* right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ push(left);
-    __ Push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (left.is(left_arg)) {
-      __ Move(right_arg, right);
-    } else if (left.is(right_arg) && IsOperationCommutative()) {
-      __ Move(left_arg, right);
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, left and right_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite left before moving
-      // it to left_arg.
-      __ movq(left_arg, left);
-      __ Move(right_arg, right);
-    }
-
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-  Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
-void GenericBinaryOpStub::GenerateCall(
-    MacroAssembler* masm,
-    Smi* left,
-    Register right) {
-  if (!ArgsInRegistersSupported()) {
-    // Pass arguments on the stack.
-    __ Push(left);
-    __ push(right);
-  } else {
-    // The calling convention with registers is left in rdx and right in rax.
-    Register left_arg = rdx;
-    Register right_arg = rax;
-    if (right.is(right_arg)) {
-      __ Move(left_arg, left);
-    } else if (right.is(left_arg) && IsOperationCommutative()) {
-      __ Move(right_arg, left);
-      SetArgsReversed();
-    } else {
-      // For non-commutative operations, right and left_arg might be
-      // the same register.  Therefore, the order of the moves is
-      // important here in order to not overwrite right before moving
-      // it to right_arg.
-      __ movq(right_arg, right);
-      __ Move(left_arg, left);
-    }
-    // Update flags to indicate that arguments are in registers.
-    SetArgsInRegisters();
-  Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
-  }
-
-  // Call the stub.
-  __ CallStub(this);
-}
-
-
 class FloatingPointHelper : public AllStatic {
  public:
   // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
@@ -460,561 +300,28 @@
   // As above, but we know the operands to be numbers. In that case,
   // conversion can't fail.
   static void LoadNumbersAsIntegers(MacroAssembler* masm);
+
+  // Tries to convert two values to smis losslessly.
+  // This fails if either argument is not a Smi nor a HeapNumber,
+  // or if it's a HeapNumber with a value that can't be converted
+  // losslessly to a Smi. In that case, control transitions to the
+  // on_not_smis label.
+  // On success, either control goes to the on_success label (if one is
+  // provided), or it falls through at the end of the code (if on_success
+  // is NULL).
+  // On success, both first and second holds Smi tagged values.
+  // One of first or second must be non-Smi when entering.
+  static void NumbersToSmis(MacroAssembler* masm,
+                            Register first,
+                            Register second,
+                            Register scratch1,
+                            Register scratch2,
+                            Register scratch3,
+                            Label* on_success,
+                            Label* on_not_smis);
 };
 
 
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
-  // dividend in rax and rdx free for the division.  Use rax, rbx for those.
-  Comment load_comment(masm, "-- Load arguments");
-  Register left = rdx;
-  Register right = rax;
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    left = rax;
-    right = rbx;
-    if (HasArgsInRegisters()) {
-      __ movq(rbx, rax);
-      __ movq(rax, rdx);
-    }
-  }
-  if (!HasArgsInRegisters()) {
-    __ movq(right, Operand(rsp, 1 * kPointerSize));
-    __ movq(left, Operand(rsp, 2 * kPointerSize));
-  }
-
-  Label not_smis;
-  // 2. Smi check both operands.
-  if (static_operands_type_.IsSmi()) {
-    // Skip smi check if we know that both arguments are smis.
-    if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left);
-      __ AbortIfNotSmi(right);
-    }
-    if (op_ == Token::BIT_OR) {
-      // Handle OR here, since we do extra smi-checking in the or code below.
-      __ SmiOr(right, right, left);
-      GenerateReturn(masm);
-      return;
-    }
-  } else {
-    if (op_ != Token::BIT_OR) {
-      // Skip the check for OR as it is better combined with the
-      // actual operation.
-      Comment smi_check_comment(masm, "-- Smi check arguments");
-      __ JumpIfNotBothSmi(left, right, &not_smis);
-    }
-  }
-
-  // 3. Operands are both smis (except for OR), perform the operation leaving
-  // the result in rax and check the result if necessary.
-  Comment perform_smi(masm, "-- Perform smi operation");
-  Label use_fp_on_smis;
-  switch (op_) {
-    case Token::ADD: {
-      ASSERT(right.is(rax));
-      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
-      break;
-    }
-
-    case Token::SUB: {
-      __ SmiSub(left, left, right, &use_fp_on_smis);
-      __ movq(rax, left);
-      break;
-    }
-
-    case Token::MUL:
-      ASSERT(right.is(rax));
-      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
-      break;
-
-    case Token::DIV:
-      ASSERT(left.is(rax));
-      __ SmiDiv(left, left, right, &use_fp_on_smis);
-      break;
-
-    case Token::MOD:
-      ASSERT(left.is(rax));
-      __ SmiMod(left, left, right, slow);
-      break;
-
-    case Token::BIT_OR:
-      ASSERT(right.is(rax));
-      __ movq(rcx, right);  // Save the right operand.
-      __ SmiOr(right, right, left);  // BIT_OR is commutative.
-      __ testb(right, Immediate(kSmiTagMask));
-      __ j(not_zero, &not_smis);
-      break;
-
-    case Token::BIT_AND:
-      ASSERT(right.is(rax));
-      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
-      break;
-
-    case Token::BIT_XOR:
-      ASSERT(right.is(rax));
-      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
-      break;
-
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      switch (op_) {
-        case Token::SAR:
-          __ SmiShiftArithmeticRight(left, left, right);
-          break;
-        case Token::SHR:
-          __ SmiShiftLogicalRight(left, left, right, slow);
-          break;
-        case Token::SHL:
-          __ SmiShiftLeft(left, left, right);
-          break;
-        default:
-          UNREACHABLE();
-      }
-      __ movq(rax, left);
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  // 4. Emit return of result in rax.
-  GenerateReturn(masm);
-
-  // 5. For some operations emit inline code to perform floating point
-  // operations on known smis (e.g., if the result of the operation
-  // overflowed the smi range).
-  switch (op_) {
-    case Token::ADD:
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV: {
-      ASSERT(use_fp_on_smis.is_linked());
-      __ bind(&use_fp_on_smis);
-      if (op_ == Token::DIV) {
-        __ movq(rdx, rax);
-        __ movq(rax, rbx);
-      }
-      // left is rdx, right is rax.
-      __ AllocateHeapNumber(rbx, rcx, slow);
-      FloatingPointHelper::LoadSSE2SmiOperands(masm);
-      switch (op_) {
-        case Token::ADD: __ addsd(xmm0, xmm1); break;
-        case Token::SUB: __ subsd(xmm0, xmm1); break;
-        case Token::MUL: __ mulsd(xmm0, xmm1); break;
-        case Token::DIV: __ divsd(xmm0, xmm1); break;
-        default: UNREACHABLE();
-      }
-      __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-      __ movq(rax, rbx);
-      GenerateReturn(masm);
-    }
-    default:
-      break;
-  }
-
-  // 6. Non-smi operands, fall out to the non-smi code with the operands in
-  // rdx and rax.
-  Comment done_comment(masm, "-- Enter non-smi code");
-  __ bind(&not_smis);
-
-  switch (op_) {
-    case Token::DIV:
-    case Token::MOD:
-      // Operands are in rax, rbx at this point.
-      __ movq(rdx, rax);
-      __ movq(rax, rbx);
-      break;
-
-    case Token::BIT_OR:
-      // Right operand is saved in rcx and rax was destroyed by the smi
-      // operation.
-      __ movq(rax, rcx);
-      break;
-
-    default:
-      break;
-  }
-}
-
-
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
-  Label call_runtime;
-
-  if (ShouldGenerateSmiCode()) {
-    GenerateSmiCode(masm, &call_runtime);
-  } else if (op_ != Token::MOD) {
-    if (!HasArgsInRegisters()) {
-      GenerateLoadArguments(masm);
-    }
-  }
-  // Floating point case.
-  if (ShouldGenerateFPCode()) {
-    switch (op_) {
-      case Token::ADD:
-      case Token::SUB:
-      case Token::MUL:
-      case Token::DIV: {
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            HasSmiCodeInStub()) {
-          // Execution reaches this point when the first non-smi argument occurs
-          // (and only if smi code is generated). This is the right moment to
-          // patch to HEAP_NUMBERS state. The transition is attempted only for
-          // the four basic operations. The stub stays in the DEFAULT state
-          // forever for all other operations (also if smi code is skipped).
-          GenerateTypeTransition(masm);
-          break;
-        }
-
-        Label not_floats;
-        // rax: y
-        // rdx: x
-        if (static_operands_type_.IsNumber()) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(rdx);
-            __ AbortIfNotNumber(rax);
-          }
-          FloatingPointHelper::LoadSSE2NumberOperands(masm);
-        } else {
-          FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
-        }
-
-        switch (op_) {
-          case Token::ADD: __ addsd(xmm0, xmm1); break;
-          case Token::SUB: __ subsd(xmm0, xmm1); break;
-          case Token::MUL: __ mulsd(xmm0, xmm1); break;
-          case Token::DIV: __ divsd(xmm0, xmm1); break;
-          default: UNREACHABLE();
-        }
-        // Allocate a heap number, if needed.
-        Label skip_allocation;
-        OverwriteMode mode = mode_;
-        if (HasArgsReversed()) {
-          if (mode == OVERWRITE_RIGHT) {
-            mode = OVERWRITE_LEFT;
-          } else if (mode == OVERWRITE_LEFT) {
-            mode = OVERWRITE_RIGHT;
-          }
-        }
-        switch (mode) {
-          case OVERWRITE_LEFT:
-            __ JumpIfNotSmi(rdx, &skip_allocation);
-            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-            __ movq(rdx, rbx);
-            __ bind(&skip_allocation);
-            __ movq(rax, rdx);
-            break;
-          case OVERWRITE_RIGHT:
-            // If the argument in rax is already an object, we skip the
-            // allocation of a heap number.
-            __ JumpIfNotSmi(rax, &skip_allocation);
-            // Fall through!
-          case NO_OVERWRITE:
-            // Allocate a heap number for the result. Keep rax and rdx intact
-            // for the possible runtime call.
-            __ AllocateHeapNumber(rbx, rcx, &call_runtime);
-            __ movq(rax, rbx);
-            __ bind(&skip_allocation);
-            break;
-          default: UNREACHABLE();
-        }
-        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
-        GenerateReturn(masm);
-        __ bind(&not_floats);
-        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
-            !HasSmiCodeInStub()) {
-            // Execution reaches this point when the first non-number argument
-            // occurs (and only if smi code is skipped from the stub, otherwise
-            // the patching has already been done earlier in this case branch).
-            // A perfect moment to try patching to STRINGS for ADD operation.
-            if (op_ == Token::ADD) {
-              GenerateTypeTransition(masm);
-            }
-        }
-        break;
-      }
-      case Token::MOD: {
-        // For MOD we go directly to runtime in the non-smi case.
-        break;
-      }
-      case Token::BIT_OR:
-      case Token::BIT_AND:
-      case Token::BIT_XOR:
-      case Token::SAR:
-      case Token::SHL:
-      case Token::SHR: {
-        Label skip_allocation, non_smi_shr_result;
-        Register heap_number_map = r9;
-        __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-        if (static_operands_type_.IsNumber()) {
-          if (FLAG_debug_code) {
-            // Assert at runtime that inputs are only numbers.
-            __ AbortIfNotNumber(rdx);
-            __ AbortIfNotNumber(rax);
-          }
-          FloatingPointHelper::LoadNumbersAsIntegers(masm);
-        } else {
-          FloatingPointHelper::LoadAsIntegers(masm,
-                                              &call_runtime,
-                                              heap_number_map);
-        }
-        switch (op_) {
-          case Token::BIT_OR:  __ orl(rax, rcx); break;
-          case Token::BIT_AND: __ andl(rax, rcx); break;
-          case Token::BIT_XOR: __ xorl(rax, rcx); break;
-          case Token::SAR: __ sarl_cl(rax); break;
-          case Token::SHL: __ shll_cl(rax); break;
-          case Token::SHR: {
-            __ shrl_cl(rax);
-            // Check if result is negative. This can only happen for a shift
-            // by zero.
-            __ testl(rax, rax);
-            __ j(negative, &non_smi_shr_result);
-            break;
-          }
-          default: UNREACHABLE();
-        }
-
-        STATIC_ASSERT(kSmiValueSize == 32);
-        // Tag smi result and return.
-        __ Integer32ToSmi(rax, rax);
-        GenerateReturn(masm);
-
-        // All bit-ops except SHR return a signed int32 that can be
-        // returned immediately as a smi.
-        // We might need to allocate a HeapNumber if we shift a negative
-        // number right by zero (i.e., convert to UInt32).
-        if (op_ == Token::SHR) {
-          ASSERT(non_smi_shr_result.is_linked());
-          __ bind(&non_smi_shr_result);
-          // Allocate a heap number if needed.
-          __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
-          switch (mode_) {
-            case OVERWRITE_LEFT:
-            case OVERWRITE_RIGHT:
-              // If the operand was an object, we skip the
-              // allocation of a heap number.
-              __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
-                                   1 * kPointerSize : 2 * kPointerSize));
-              __ JumpIfNotSmi(rax, &skip_allocation);
-              // Fall through!
-            case NO_OVERWRITE:
-              // Allocate heap number in new space.
-              // Not using AllocateHeapNumber macro in order to reuse
-              // already loaded heap_number_map.
-              __ AllocateInNewSpace(HeapNumber::kSize,
-                                    rax,
-                                    rcx,
-                                    no_reg,
-                                    &call_runtime,
-                                    TAG_OBJECT);
-              // Set the map.
-              if (FLAG_debug_code) {
-                __ AbortIfNotRootValue(heap_number_map,
-                                       Heap::kHeapNumberMapRootIndex,
-                                       "HeapNumberMap register clobbered.");
-              }
-              __ movq(FieldOperand(rax, HeapObject::kMapOffset),
-                      heap_number_map);
-              __ bind(&skip_allocation);
-              break;
-            default: UNREACHABLE();
-          }
-          // Store the result in the HeapNumber and return.
-          __ cvtqsi2sd(xmm0, rbx);
-          __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
-          GenerateReturn(masm);
-        }
-
-        break;
-      }
-      default: UNREACHABLE(); break;
-    }
-  }
-
-  // If all else fails, use the runtime system to get the correct
-  // result. If arguments was passed in registers now place them on the
-  // stack in the correct order below the return address.
-  __ bind(&call_runtime);
-
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  switch (op_) {
-    case Token::ADD: {
-      // Registers containing left and right operands respectively.
-      Register lhs, rhs;
-
-      if (HasArgsReversed()) {
-        lhs = rax;
-        rhs = rdx;
-      } else {
-        lhs = rdx;
-        rhs = rax;
-      }
-
-      // Test for string arguments before calling runtime.
-      Label not_strings, both_strings, not_string1, string1, string1_smi2;
-
-      // If this stub has already generated FP-specific code then the arguments
-      // are already in rdx and rax.
-      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
-        GenerateLoadArguments(masm);
-      }
-
-      Condition is_smi;
-      is_smi = masm->CheckSmi(lhs);
-      __ j(is_smi, &not_string1);
-      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
-      __ j(above_equal, &not_string1);
-
-      // First argument is a a string, test second.
-      is_smi = masm->CheckSmi(rhs);
-      __ j(is_smi, &string1_smi2);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
-      __ j(above_equal, &string1);
-
-      // First and second argument are strings.
-      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&string_add_stub);
-
-      __ bind(&string1_smi2);
-      // First argument is a string, second is a smi. Try to lookup the number
-      // string for the smi in the number string cache.
-      NumberToStringStub::GenerateLookupNumberStringCache(
-          masm, rhs, rbx, rcx, r8, true, &string1);
-
-      // Replace second argument on stack and tailcall string add stub to make
-      // the result.
-      __ movq(Operand(rsp, 1 * kPointerSize), rbx);
-      __ TailCallStub(&string_add_stub);
-
-      // Only first argument is a string.
-      __ bind(&string1);
-      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
-      // First argument was not a string, test second.
-      __ bind(&not_string1);
-      is_smi = masm->CheckSmi(rhs);
-      __ j(is_smi, &not_strings);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
-      __ j(above_equal, &not_strings);
-
-      // Only second argument is a string.
-      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
-      __ bind(&not_strings);
-      // Neither argument is a string.
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
-    }
-    case Token::SUB:
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
-    case Token::MUL:
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
-    case Token::DIV:
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
-      break;
-    case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-      break;
-    case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
-    case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
-    case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
-    case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
-    case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
-    case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
-  ASSERT(!HasArgsInRegisters());
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-}
-
-
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
-  // If arguments are not passed in registers remove them from the stack before
-  // returning.
-  if (!HasArgsInRegisters()) {
-    __ ret(2 * kPointerSize);  // Remove both operands
-  } else {
-    __ ret(0);
-  }
-}
-
-
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
-  ASSERT(HasArgsInRegisters());
-  __ pop(rcx);
-  if (HasArgsReversed()) {
-    __ push(rax);
-    __ push(rdx);
-  } else {
-    __ push(rdx);
-    __ push(rax);
-  }
-  __ push(rcx);
-}
-
-
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
-  Label get_result;
-
-  // Ensure the operands are on the stack.
-  if (HasArgsInRegisters()) {
-    GenerateRegisterArgsPush(masm);
-  }
-
-  // Left and right arguments are already on stack.
-  __ pop(rcx);  // Save the return address.
-
-  // Push this stub's key.
-  __ Push(Smi::FromInt(MinorKey()));
-
-  // Although the operation and the type info are encoded into the key,
-  // the encoding is opaque, so push them too.
-  __ Push(Smi::FromInt(op_));
-
-  __ Push(Smi::FromInt(runtime_operands_type_));
-
-  __ push(rcx);  // The return address.
-
-  // Perform patching to an appropriate fast case and return the result.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
-      5,
-      1);
-}
-
-
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
-  GenericBinaryOpStub stub(key, type_info);
-  return stub.GetCode();
-}
-
-
 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
     TRBinaryOpIC::TypeInfo type_info,
     TRBinaryOpIC::TypeInfo result_type_info) {
@@ -1065,6 +372,9 @@
     case TRBinaryOpIC::ODDBALL:
       GenerateOddballStub(masm);
       break;
+    case TRBinaryOpIC::BOTH_STRING:
+      GenerateBothStringStub(masm);
+      break;
     case TRBinaryOpIC::STRING:
       GenerateStringStub(masm);
       break;
@@ -1105,29 +415,30 @@
     Label* slow,
     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
 
-  // We only generate heapnumber answers for overflowing calculations
-  // for the four basic arithmetic operations.
-  bool generate_inline_heapnumber_results =
-      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
-      (op_ == Token::ADD || op_ == Token::SUB ||
-       op_ == Token::MUL || op_ == Token::DIV);
-
   // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
   Register left = rdx;
   Register right = rax;
 
+  // We only generate heapnumber answers for overflowing calculations
+  // for the four basic arithmetic operations and logical right shift by 0.
+  bool generate_inline_heapnumber_results =
+      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+      (op_ == Token::ADD || op_ == Token::SUB ||
+       op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
 
   // Smi check of both operands.  If op is BIT_OR, the check is delayed
   // until after the OR operation.
   Label not_smis;
   Label use_fp_on_smis;
-  Label restore_MOD_registers;  // Only used if op_ == Token::MOD.
+  Label fail;
 
   if (op_ != Token::BIT_OR) {
     Comment smi_check_comment(masm, "-- Smi check arguments");
     __ JumpIfNotBothSmi(left, right, &not_smis);
   }
 
+  Label smi_values;
+  __ bind(&smi_values);
   // Perform the operation.
   Comment perform_smi(masm, "-- Perform smi operation");
   switch (op_) {
@@ -1166,9 +477,7 @@
 
     case Token::BIT_OR: {
       ASSERT(right.is(rax));
-      __ movq(rcx, right);  // Save the right operand.
-      __ SmiOr(right, right, left);  // BIT_OR is commutative.
-      __ JumpIfNotSmi(right, &not_smis);  // Test delayed until after BIT_OR.
+      __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
       break;
       }
     case Token::BIT_XOR:
@@ -1192,7 +501,7 @@
       break;
 
     case Token::SHR:
-      __ SmiShiftLogicalRight(left, left, right, &not_smis);
+      __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
       __ movq(rax, left);
       break;
 
@@ -1203,41 +512,52 @@
   // 5. Emit return of result in rax.  Some operations have registers pushed.
   __ ret(0);
 
-  // 6. For some operations emit inline code to perform floating point
-  //    operations on known smis (e.g., if the result of the operation
-  //    overflowed the smi range).
-  __ bind(&use_fp_on_smis);
-  if (op_ == Token::DIV || op_ == Token::MOD) {
-    // Restore left and right to rdx and rax.
-    __ movq(rdx, rcx);
-    __ movq(rax, rbx);
-  }
-
-
-  if (generate_inline_heapnumber_results) {
-    __ AllocateHeapNumber(rcx, rbx, slow);
-    Comment perform_float(masm, "-- Perform float operation on smis");
-    FloatingPointHelper::LoadSSE2SmiOperands(masm);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
+  if (use_fp_on_smis.is_linked()) {
+    // 6. For some operations emit inline code to perform floating point
+    //    operations on known smis (e.g., if the result of the operation
+    //    overflowed the smi range).
+    __ bind(&use_fp_on_smis);
+    if (op_ == Token::DIV || op_ == Token::MOD) {
+      // Restore left and right to rdx and rax.
+      __ movq(rdx, rcx);
+      __ movq(rax, rbx);
     }
-    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
-    __ movq(rax, rcx);
-    __ ret(0);
+
+    if (generate_inline_heapnumber_results) {
+      __ AllocateHeapNumber(rcx, rbx, slow);
+      Comment perform_float(masm, "-- Perform float operation on smis");
+      if (op_ == Token::SHR) {
+        __ SmiToInteger32(left, left);
+        __ cvtqsi2sd(xmm0, left);
+      } else {
+        FloatingPointHelper::LoadSSE2SmiOperands(masm);
+        switch (op_) {
+        case Token::ADD: __ addsd(xmm0, xmm1); break;
+        case Token::SUB: __ subsd(xmm0, xmm1); break;
+        case Token::MUL: __ mulsd(xmm0, xmm1); break;
+        case Token::DIV: __ divsd(xmm0, xmm1); break;
+        default: UNREACHABLE();
+        }
+      }
+      __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+      __ movq(rax, rcx);
+      __ ret(0);
+    } else {
+      __ jmp(&fail);
+    }
   }
 
   // 7. Non-smi operands reach the end of the code generated by
   //    GenerateSmiCode, and fall through to subsequent code,
   //    with the operands in rdx and rax.
-  Comment done_comment(masm, "-- Enter non-smi code");
+  //    But first we check if non-smi values are HeapNumbers holding
+  //    values that could be smi.
   __ bind(&not_smis);
-  if (op_ == Token::BIT_OR) {
-    __ movq(right, rcx);
-  }
+  Comment done_comment(masm, "-- Enter non-smi code");
+  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
+                                     &smi_values, &fail);
+  __ jmp(&smi_values);
+  __ bind(&fail);
 }
 
 
@@ -1422,12 +742,25 @@
 
 
 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
-  Label not_smi;
+  Label call_runtime;
+  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
+      result_type_ == TRBinaryOpIC::SMI) {
+    // Only allow smi results.
+    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+  } else {
+    // Allow heap number result and don't make a transition if a heap number
+    // cannot be allocated.
+    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  }
 
-  GenerateSmiCode(masm, &not_smi, NO_HEAPNUMBER_RESULTS);
-
-  __ bind(&not_smi);
+  // Code falls through if the result is not returned as either a smi or heap
+  // number.
   GenerateTypeTransition(masm);
+
+  if (call_runtime.is_linked()) {
+    __ bind(&call_runtime);
+    GenerateCallRuntimeCode(masm);
+  }
 }
 
 
@@ -1441,6 +774,36 @@
 }
 
 
+void TypeRecordingBinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
+  Label call_runtime;
+  ASSERT(operands_type_ == TRBinaryOpIC::BOTH_STRING);
+  ASSERT(op_ == Token::ADD);
+  // If both arguments are strings, call the string add stub.
+  // Otherwise, do a transition.
+
+  // Registers containing left and right operands respectively.
+  Register left = rdx;
+  Register right = rax;
+
+  // Test if left operand is a string.
+  __ JumpIfSmi(left, &call_runtime);
+  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  // Test if right operand is a string.
+  __ JumpIfSmi(right, &call_runtime);
+  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
+  __ j(above_equal, &call_runtime);
+
+  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+  GenerateRegisterArgsPush(masm);
+  __ TailCallStub(&string_add_stub);
+
+  __ bind(&call_runtime);
+  GenerateTypeTransition(masm);
+}
+
+
 void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
   Label call_runtime;
 
@@ -1951,7 +1314,7 @@
   __ bind(&check_undefined_arg1);
   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
   __ j(not_equal, conversion_failure);
-  __ movl(r8, Immediate(0));
+  __ Set(r8, 0);
   __ jmp(&load_arg2);
 
   __ bind(&arg1_is_object);
@@ -1971,7 +1334,7 @@
   __ bind(&check_undefined_arg2);
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   __ j(not_equal, conversion_failure);
-  __ movl(rcx, Immediate(0));
+  __ Set(rcx, 0);
   __ jmp(&done);
 
   __ bind(&arg2_is_object);
@@ -2046,6 +1409,62 @@
 }
 
 
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
+                                        Register first,
+                                        Register second,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register scratch3,
+                                        Label* on_success,
+                                        Label* on_not_smis)   {
+  Register heap_number_map = scratch3;
+  Register smi_result = scratch1;
+  Label done;
+
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+  NearLabel first_smi, check_second;
+  __ JumpIfSmi(first, &first_smi);
+  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
+  __ j(not_equal, on_not_smis);
+  // Convert HeapNumber to smi if possible.
+  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
+  __ movq(scratch2, xmm0);
+  __ cvttsd2siq(smi_result, xmm0);
+  // Check if conversion was successful by converting back and
+  // comparing to the original double's bits.
+  __ cvtlsi2sd(xmm1, smi_result);
+  __ movq(kScratchRegister, xmm1);
+  __ cmpq(scratch2, kScratchRegister);
+  __ j(not_equal, on_not_smis);
+  __ Integer32ToSmi(first, smi_result);
+
+  __ bind(&check_second);
+  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
+  __ bind(&first_smi);
+  if (FLAG_debug_code) {
+    // Second should be non-smi if we get here.
+    __ AbortIfSmi(second);
+  }
+  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
+  __ j(not_equal, on_not_smis);
+  // Convert second to smi, if possible.
+  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
+  __ movq(scratch2, xmm0);
+  __ cvttsd2siq(smi_result, xmm0);
+  __ cvtlsi2sd(xmm1, smi_result);
+  __ movq(kScratchRegister, xmm1);
+  __ cmpq(scratch2, kScratchRegister);
+  __ j(not_equal, on_not_smis);
+  __ Integer32ToSmi(second, smi_result);
+  if (on_success != NULL) {
+    __ jmp(on_success);
+  } else {
+    __ bind(&done);
+  }
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
   Label slow, done;
 
@@ -2072,7 +1491,7 @@
     __ j(not_equal, &slow);
     // Operand is a float, negate its value by flipping sign bit.
     __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
-    __ movq(kScratchRegister, Immediate(0x01));
+    __ Set(kScratchRegister, 0x01);
     __ shl(kScratchRegister, Immediate(63));
     __ xor_(rdx, kScratchRegister);  // Flip sign.
     // rdx is value to store.
@@ -2144,7 +1563,7 @@
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
   // Save 1 in xmm3 - we need this several times later on.
-  __ movl(rcx, Immediate(1));
+  __ Set(rcx, 1);
   __ cvtlsi2sd(xmm3, rcx);
 
   Label exponent_nonsmi;
@@ -2183,7 +1602,7 @@
   __ bind(&no_neg);
 
   // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
+  __ movaps(xmm1, xmm3);
   NearLabel while_true;
   NearLabel no_multiply;
 
@@ -2201,8 +1620,8 @@
   __ j(positive, &allocate_return);
   // Special case if xmm1 has reached infinity.
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ xorpd(xmm0, xmm0);
+  __ movaps(xmm1, xmm3);
+  __ xorps(xmm0, xmm0);
   __ ucomisd(xmm0, xmm1);
   __ j(equal, &call_runtime);
 
@@ -2250,11 +1669,11 @@
 
   // Calculates reciprocal of square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
+  __ xorps(xmm1, xmm1);
   __ addsd(xmm1, xmm0);
   __ sqrtsd(xmm1, xmm1);
   __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
+  __ movaps(xmm1, xmm3);
   __ jmp(&allocate_return);
 
   // Test for 0.5.
@@ -2267,8 +1686,8 @@
   __ j(not_equal, &call_runtime);
   // Calculates square root.
   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
   __ sqrtsd(xmm1, xmm1);
 
   __ bind(&allocate_return);
@@ -2944,9 +2363,10 @@
   // Heap::GetNumberStringCache.
   Label is_smi;
   Label load_result_from_cache;
+  Factory* factory = masm->isolate()->factory();
   if (!object_is_smi) {
     __ JumpIfSmi(object, &is_smi);
-    __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
+    __ CheckMap(object, factory->heap_number_map(), not_found, true);
 
     STATIC_ASSERT(8 == kDoubleSize);
     __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
@@ -2961,8 +2381,6 @@
                          times_1,
                          FixedArray::kHeaderSize));
     __ JumpIfSmi(probe, not_found);
-    ASSERT(Isolate::Current()->cpu_features()->IsSupported(SSE2));
-    CpuFeatures::Scope fscope(SSE2);
     __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
     __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
     __ ucomisd(xmm0, xmm1);
@@ -3035,6 +2453,7 @@
   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
   Label check_unequal_objects, done;
+  Factory* factory = masm->isolate()->factory();
 
   // Compare two smis if required.
   if (include_smi_compare_) {
@@ -3082,7 +2501,6 @@
     // Note: if cc_ != equal, never_nan_nan_ is not used.
     // We cannot set rax to EQUAL until just before return because
     // rax must be unchanged on jump to not_identical.
-
     if (never_nan_nan_ && (cc_ == equal)) {
       __ Set(rax, EQUAL);
       __ ret(0);
@@ -3090,7 +2508,7 @@
       NearLabel heap_number;
       // If it's not a heap number, then return equal for (in)equality operator.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
-             FACTORY->heap_number_map());
+             factory->heap_number_map());
       __ j(equal, &heap_number);
       if (cc_ != equal) {
         // Call runtime on identical JSObjects.  Otherwise return equal.
@@ -3135,7 +2553,7 @@
 
         // Check if the non-smi operand is a heap number.
         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
+               factory->heap_number_map());
         // If heap number, handle it in the slow case.
         __ j(equal, &slow);
         // Return non-equal.  ebx (the lower half of rbx) is not zero.
@@ -3761,10 +3179,10 @@
   // is and instance of the function and anything else to
   // indicate that the value is not an instance.
 
-  static const int kOffsetToMapCheckValue = 5;
-  static const int kOffsetToResultValue = 21;
+  static const int kOffsetToMapCheckValue = 2;
+  static const int kOffsetToResultValue = 18;
   // The last 4 bytes of the instruction sequence
-  //   movq(rax, FieldOperand(rdi, HeapObject::kMapOffset)
+  //   movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
   //   Move(kScratchRegister, FACTORY->the_hole_value())
   // in front of the hole value address.
   static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
@@ -3830,7 +3248,7 @@
     if (FLAG_debug_code) {
       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
-      __ Assert(equal, "InstanceofStub unexpected call site cache.");
+      __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
     }
   }
 
@@ -3867,9 +3285,9 @@
     if (FLAG_debug_code) {
       __ movl(rax, Immediate(kWordBeforeResultValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
-      __ Assert(equal, "InstanceofStub unexpected call site cache.");
+      __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
     }
-    __ xorl(rax, rax);
+    __ Set(rax, 0);
   }
   __ ret(2 * kPointerSize + extra_stack_space);
 
@@ -4066,10 +3484,11 @@
     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
+  Factory* factory = masm->isolate()->factory();
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
-  __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
+  __ CheckMap(index_, factory->heap_number_map(), index_not_number_, true);
   call_helper.BeforeCall(masm);
   __ push(object_);
   __ push(index_);
@@ -4728,7 +4147,7 @@
   // if (hash == 0) hash = 27;
   Label hash_not_zero;
   __ j(not_zero, &hash_not_zero);
-  __ movl(hash, Immediate(27));
+  __ Set(hash, 27);
   __ bind(&hash_not_zero);
 }
 
@@ -4924,7 +4343,7 @@
     // Use scratch3 as loop index, min_length as limit and scratch2
     // for computation.
     const Register index = scratch3;
-    __ movl(index, Immediate(0));  // Index into strings.
+    __ Set(index, 0);  // Index into strings.
     __ bind(&loop);
     // Compare characters.
     // TODO(lrn): Could we load more than one character at a time?
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 246650a..3b40280 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -71,145 +71,6 @@
 };
 
 
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
-  NO_GENERIC_BINARY_FLAGS = 0,
-  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
-  GenericBinaryOpStub(Token::Value op,
-                      OverwriteMode mode,
-                      GenericBinaryFlags flags,
-                      TypeInfo operands_type = TypeInfo::Unknown())
-      : op_(op),
-        mode_(mode),
-        flags_(flags),
-        args_in_registers_(false),
-        args_reversed_(false),
-        static_operands_type_(operands_type),
-        runtime_operands_type_(BinaryOpIC::DEFAULT),
-        name_(NULL) {
-    ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
-  }
-
-  GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo runtime_operands_type)
-      : op_(OpBits::decode(key)),
-        mode_(ModeBits::decode(key)),
-        flags_(FlagBits::decode(key)),
-        args_in_registers_(ArgsInRegistersBits::decode(key)),
-        args_reversed_(ArgsReversedBits::decode(key)),
-        static_operands_type_(TypeInfo::ExpandedRepresentation(
-            StaticTypeInfoBits::decode(key))),
-        runtime_operands_type_(runtime_operands_type),
-        name_(NULL) {
-  }
-
-  // Generate code to call the stub with the supplied arguments. This will add
-  // code at the call site to prepare arguments either in registers or on the
-  // stack together with the actual call.
-  void GenerateCall(MacroAssembler* masm, Register left, Register right);
-  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
-  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
-
-  bool ArgsInRegistersSupported() {
-    return (op_ == Token::ADD) || (op_ == Token::SUB)
-        || (op_ == Token::MUL) || (op_ == Token::DIV);
-  }
-
- private:
-  Token::Value op_;
-  OverwriteMode mode_;
-  GenericBinaryFlags flags_;
-  bool args_in_registers_;  // Arguments passed in registers not on the stack.
-  bool args_reversed_;  // Left and right argument are swapped.
-
-  // Number type information of operands, determined by code generator.
-  TypeInfo static_operands_type_;
-
-  // Operand type information determined at runtime.
-  BinaryOpIC::TypeInfo runtime_operands_type_;
-
-  char* name_;
-
-  const char* GetName();
-
-#ifdef DEBUG
-  void Print() {
-    PrintF("GenericBinaryOpStub %d (op %s), "
-           "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n",
-           MinorKey(),
-           Token::String(op_),
-           static_cast<int>(mode_),
-           static_cast<int>(flags_),
-           static_cast<int>(args_in_registers_),
-           static_cast<int>(args_reversed_),
-           static_operands_type_.ToString());
-  }
-#endif
-
-  // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 7> {};
-  class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
-  class ArgsReversedBits: public BitField<bool, 10, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
-  class StaticTypeInfoBits: public BitField<int, 12, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 3> {};
-
-  Major MajorKey() { return GenericBinaryOp; }
-  int MinorKey() {
-    // Encode the parameters in a unique 18 bit value.
-    return OpBits::encode(op_)
-           | ModeBits::encode(mode_)
-           | FlagBits::encode(flags_)
-           | ArgsInRegistersBits::encode(args_in_registers_)
-           | ArgsReversedBits::encode(args_reversed_)
-           | StaticTypeInfoBits::encode(
-               static_operands_type_.ThreeBitRepresentation())
-           | RuntimeTypeInfoBits::encode(runtime_operands_type_);
-  }
-
-  void Generate(MacroAssembler* masm);
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-  void GenerateLoadArguments(MacroAssembler* masm);
-  void GenerateReturn(MacroAssembler* masm);
-  void GenerateRegisterArgsPush(MacroAssembler* masm);
-  void GenerateTypeTransition(MacroAssembler* masm);
-
-  bool IsOperationCommutative() {
-    return (op_ == Token::ADD) || (op_ == Token::MUL);
-  }
-
-  void SetArgsInRegisters() { args_in_registers_ = true; }
-  void SetArgsReversed() { args_reversed_ = true; }
-  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
-  bool HasArgsInRegisters() { return args_in_registers_; }
-  bool HasArgsReversed() { return args_reversed_; }
-
-  bool ShouldGenerateSmiCode() {
-    return HasSmiCodeInStub() &&
-        runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
-        runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  bool ShouldGenerateFPCode() {
-    return runtime_operands_type_ != BinaryOpIC::STRINGS;
-  }
-
-  virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
-  virtual InlineCacheState GetICState() {
-    return BinaryOpIC::ToState(runtime_operands_type_);
-  }
-
-  friend class CodeGenerator;
-  friend class LCodeGen;
-};
-
-
 class TypeRecordingBinaryOpStub: public CodeStub {
  public:
   TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
@@ -291,6 +152,7 @@
   void GenerateHeapNumberStub(MacroAssembler* masm);
   void GenerateOddballStub(MacroAssembler* masm);
   void GenerateStringStub(MacroAssembler* masm);
+  void GenerateBothStringStub(MacroAssembler* masm);
   void GenerateGenericStub(MacroAssembler* masm);
 
   void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
deleted file mode 100644
index 53caf91..0000000
--- a/src/x64/codegen-x64-inl.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-#ifndef V8_X64_CODEGEN_X64_INL_H_
-#define V8_X64_CODEGEN_X64_INL_H_
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm_)
-
-// Platform-specific inline functions.
-
-void DeferredCode::Jump() { __ jmp(&entry_label_); }
-void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 8c338fe..f8f2d6e 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -29,81 +29,14 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "bootstrapper.h"
-#include "code-stubs.h"
-#include "codegen-inl.h"
-#include "compiler.h"
-#include "debug.h"
-#include "ic-inl.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "virtual-frame-inl.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-// -------------------------------------------------------------------------
-// Platform-specific FrameRegisterState functions.
-
-void FrameRegisterState::Save(MacroAssembler* masm) const {
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ push(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
-      __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i));
-    }
-  }
-}
-
-
-void FrameRegisterState::Restore(MacroAssembler* masm) const {
-  // Restore registers in reverse order due to the stack.
-  for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
-    int action = registers_[i];
-    if (action == kPush) {
-      __ pop(RegisterAllocator::ToRegister(i));
-    } else if (action != kIgnore) {
-      action &= ~kSyncedFlag;
-      __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action));
-    }
-  }
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm_)
-
-// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
-
-void DeferredCode::SaveRegisters() {
-  frame_state_.Save(masm_);
-}
-
-
-void DeferredCode::RestoreRegisters() {
-  frame_state_.Restore(masm_);
-}
-
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
-void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  frame_state_->Save(masm);
-}
-
-
-void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  frame_state_->Restore(masm);
-}
-
-
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterInternalFrame();
 }
@@ -114,8639 +47,6 @@
 }
 
 
-// -------------------------------------------------------------------------
-// CodeGenState implementation.
-
-CodeGenState::CodeGenState(CodeGenerator* owner)
-    : owner_(owner),
-      destination_(NULL),
-      previous_(NULL) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::CodeGenState(CodeGenerator* owner,
-                           ControlDestination* destination)
-    : owner_(owner),
-      destination_(destination),
-      previous_(owner->state()) {
-  owner_->set_state(this);
-}
-
-
-CodeGenState::~CodeGenState() {
-  ASSERT(owner_->state() == this);
-  owner_->set_state(previous_);
-}
-
-
-// -------------------------------------------------------------------------
-// CodeGenerator implementation.
-
-CodeGenerator::CodeGenerator(MacroAssembler* masm)
-    : deferred_(8),
-      masm_(masm),
-      info_(NULL),
-      frame_(NULL),
-      allocator_(NULL),
-      state_(NULL),
-      loop_nesting_(0),
-      function_return_is_shadowed_(false),
-      in_spilled_code_(false) {
-}
-
-
-// Calling conventions:
-// rbp: caller's frame pointer
-// rsp: stack pointer
-// rdi: called JS function
-// rsi: callee's context
-
-void CodeGenerator::Generate(CompilationInfo* info) {
-  // Record the position for debugging purposes.
-  CodeForFunctionPosition(info->function());
-  Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
-
-  // Initialize state.
-  info_ = info;
-  ASSERT(allocator_ == NULL);
-  RegisterAllocator register_allocator(this);
-  allocator_ = &register_allocator;
-  ASSERT(frame_ == NULL);
-  frame_ = new VirtualFrame();
-  set_in_spilled_code(false);
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(0, loop_nesting_);
-  loop_nesting_ = info->is_in_loop() ? 1 : 0;
-
-  Isolate::Current()->set_jump_target_compiling_deferred_code(false);
-
-  {
-    CodeGenState state(this);
-    // Entry:
-    // Stack: receiver, arguments, return address.
-    // rbp: caller's frame pointer
-    // rsp: stack pointer
-    // rdi: called JS function
-    // rsi: callee's context
-    allocator_->Initialize();
-
-#ifdef DEBUG
-    if (strlen(FLAG_stop_at) > 0 &&
-        info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-      frame_->SpillAll();
-      __ int3();
-    }
-#endif
-
-    frame_->Enter();
-
-    // Allocate space for locals and initialize them.
-    frame_->AllocateStackSlots();
-
-    // Allocate the local context if needed.
-    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (heap_slots > 0) {
-      Comment cmnt(masm_, "[ allocate local context");
-      // Allocate local context.
-      // Get outer context and create a new context based on it.
-      frame_->PushFunction();
-      Result context;
-      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-        FastNewContextStub stub(heap_slots);
-        context = frame_->CallStub(&stub, 1);
-      } else {
-        context = frame_->CallRuntime(Runtime::kNewContext, 1);
-      }
-
-      // Update context local.
-      frame_->SaveContextRegister();
-
-      // Verify that the runtime call result and rsi agree.
-      if (FLAG_debug_code) {
-        __ cmpq(context.reg(), rsi);
-        __ Assert(equal, "Runtime::NewContext should end up in rsi");
-      }
-    }
-
-    // TODO(1241774): Improve this code:
-    // 1) only needed if we have a context
-    // 2) no need to recompute context ptr every single time
-    // 3) don't copy parameter operand code from SlotOperand!
-    {
-      Comment cmnt2(masm_, "[ copy context parameters into .context");
-      // Note that iteration order is relevant here! If we have the same
-      // parameter twice (e.g., function (x, y, x)), and that parameter
-      // needs to be copied into the context, it must be the last argument
-      // passed to the parameter that needs to be copied. This is a rare
-      // case so we don't check for it, instead we rely on the copying
-      // order: such a parameter is copied repeatedly into the same
-      // context location and thus the last value is what is seen inside
-      // the function.
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        Variable* par = scope()->parameter(i);
-        Slot* slot = par->AsSlot();
-        if (slot != NULL && slot->type() == Slot::CONTEXT) {
-          // The use of SlotOperand below is safe in unspilled code
-          // because the slot is guaranteed to be a context slot.
-          //
-          // There are no parameters in the global scope.
-          ASSERT(!scope()->is_global_scope());
-          frame_->PushParameterAt(i);
-          Result value = frame_->Pop();
-          value.ToRegister();
-
-          // SlotOperand loads context.reg() with the context object
-          // stored to, used below in RecordWrite.
-          Result context = allocator_->Allocate();
-          ASSERT(context.is_valid());
-          __ movq(SlotOperand(slot, context.reg()), value.reg());
-          int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-          Result scratch = allocator_->Allocate();
-          ASSERT(scratch.is_valid());
-          frame_->Spill(context.reg());
-          frame_->Spill(value.reg());
-          __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg());
-        }
-      }
-    }
-
-    // Store the arguments object.  This must happen after context
-    // initialization because the arguments object may be stored in
-    // the context.
-    if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
-      StoreArgumentsObject(true);
-    }
-
-    // Initialize ThisFunction reference if present.
-    if (scope()->is_function_scope() && scope()->function() != NULL) {
-      frame_->Push(FACTORY->the_hole_value());
-      StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT);
-    }
-
-    // Initialize the function return target after the locals are set
-    // up, because it needs the expected frame height from the frame.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
-    function_return_is_shadowed_ = false;
-
-    // Generate code to 'execute' declarations and initialize functions
-    // (source elements). In case of an illegal redeclaration we need to
-    // handle that instead of processing the declarations.
-    if (scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ illegal redeclarations");
-      scope()->VisitIllegalRedeclaration(this);
-    } else {
-      Comment cmnt(masm_, "[ declarations");
-      ProcessDeclarations(scope()->declarations());
-      // Bail out if a stack-overflow exception occurred when processing
-      // declarations.
-      if (HasStackOverflow()) return;
-    }
-
-    if (FLAG_trace) {
-      frame_->CallRuntime(Runtime::kTraceEnter, 0);
-      // Ignore the return value.
-    }
-    CheckStack();
-
-    // Compile the body of the function in a vanilla state. Don't
-    // bother compiling all the code if the scope has an illegal
-    // redeclaration.
-    if (!scope()->HasIllegalRedeclaration()) {
-      Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
-      bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
-      bool should_trace =
-          is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
-      if (should_trace) {
-        frame_->CallRuntime(Runtime::kDebugTrace, 0);
-        // Ignore the return value.
-      }
-#endif
-      VisitStatements(info->function()->body());
-
-      // Handle the return from the function.
-      if (has_valid_frame()) {
-        // If there is a valid frame, control flow can fall off the end of
-        // the body.  In that case there is an implicit return statement.
-        ASSERT(!function_return_is_shadowed_);
-        CodeForReturnPosition(info->function());
-        frame_->PrepareForReturn();
-        Result undefined(FACTORY->undefined_value());
-        if (function_return_.is_bound()) {
-          function_return_.Jump(&undefined);
-        } else {
-          function_return_.Bind(&undefined);
-          GenerateReturnSequence(&undefined);
-        }
-      } else if (function_return_.is_linked()) {
-        // If the return target has dangling jumps to it, then we have not
-        // yet generated the return sequence.  This can happen when (a)
-        // control does not flow off the end of the body so we did not
-        // compile an artificial return statement just above, and (b) there
-        // are return statements in the body but (c) they are all shadowed.
-        Result return_value;
-        function_return_.Bind(&return_value);
-        GenerateReturnSequence(&return_value);
-      }
-    }
-  }
-
-  // Adjust for function-level loop nesting.
-  ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0);
-  loop_nesting_ = 0;
-
-  // Code generation state must be reset.
-  ASSERT(state_ == NULL);
-  ASSERT(!function_return_is_shadowed_);
-  function_return_.Unuse();
-  DeleteFrame();
-
-  // Process any deferred code using the register allocator.
-  if (!HasStackOverflow()) {
-    info->isolate()->set_jump_target_compiling_deferred_code(true);
-    ProcessDeferred();
-    info->isolate()->set_jump_target_compiling_deferred_code(false);
-  }
-
-  // There is no need to delete the register allocator, it is a
-  // stack-allocated local.
-  allocator_ = NULL;
-}
-
-
-Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
-  // Currently, this assertion will fail if we try to assign to
-  // a constant variable that is constant because it is read-only
-  // (such as the variable referring to a named function expression).
-  // We need to implement assignments to read-only variables.
-  // Ideally, we should do this during AST generation (by converting
-  // such assignments into expression statements); however, in general
-  // we may not be able to make the decision until past AST generation,
-  // that is when the entire program is known.
-  ASSERT(slot != NULL);
-  int index = slot->index();
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      return frame_->ParameterAt(index);
-
-    case Slot::LOCAL:
-      return frame_->LocalAt(index);
-
-    case Slot::CONTEXT: {
-      // Follow the context chain if necessary.
-      ASSERT(!tmp.is(rsi));  // do not overwrite context register
-      Register context = rsi;
-      int chain_length = scope()->ContextChainLength(slot->var()->scope());
-      for (int i = 0; i < chain_length; i++) {
-        // Load the closure.
-        // (All contexts, even 'with' contexts, have a closure,
-        // and it is the same for all contexts inside a function.
-        // There is no need to go to the function context first.)
-        __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
-        // Load the function context (which is the incoming, outer context).
-        __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
-        context = tmp;
-      }
-      // We may have a 'with' context now. Get the function context.
-      // (In fact this mov may never be the needed, since the scope analysis
-      // may not permit a direct context access in this case and thus we are
-      // always at a function context. However it is safe to dereference be-
-      // cause the function context of a function context is itself. Before
-      // deleting this mov we should try to create a counter-example first,
-      // though...)
-      __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
-      return ContextOperand(tmp, index);
-    }
-
-    default:
-      UNREACHABLE();
-      return Operand(rsp, 0);
-  }
-}
-
-
-Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
-                                                         Result tmp,
-                                                         JumpTarget* slow) {
-  ASSERT(slot->type() == Slot::CONTEXT);
-  ASSERT(tmp.is_register());
-  Register context = rsi;
-
-  for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
-                Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-  }
-  // Check that last extension is NULL.
-  __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
-  slow->Branch(not_equal, not_taken);
-  __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
-  return ContextOperand(tmp.reg(), slot->index());
-}
-
-
-// Emit code to load the value of an expression to the top of the
-// frame. If the expression is boolean-valued it may be compiled (or
-// partially compiled) into control flow to the control destination.
-// If force_control is true, control flow is forced.
-void CodeGenerator::LoadCondition(Expression* expr,
-                                  ControlDestination* dest,
-                                  bool force_control) {
-  ASSERT(!in_spilled_code());
-  int original_height = frame_->height();
-
-  { CodeGenState new_state(this, dest);
-    Visit(expr);
-
-    // If we hit a stack overflow, we may not have actually visited
-    // the expression.  In that case, we ensure that we have a
-    // valid-looking frame state because we will continue to generate
-    // code as we unwind the C++ stack.
-    //
-    // It's possible to have both a stack overflow and a valid frame
-    // state (eg, a subexpression overflowed, visiting it returned
-    // with a dummied frame state, and visiting this expression
-    // returned with a normal-looking state).
-    if (HasStackOverflow() &&
-        !dest->is_used() &&
-        frame_->height() == original_height) {
-      dest->Goto(true);
-    }
-  }
-
-  if (force_control && !dest->is_used()) {
-    // Convert the TOS value into flow to the control destination.
-    ToBoolean(dest);
-  }
-
-  ASSERT(!(force_control && !dest->is_used()));
-  ASSERT(dest->is_used() || frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Load(expression);
-  frame_->SpillAll();
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::Load(Expression* expr) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  JumpTarget true_target;
-  JumpTarget false_target;
-  ControlDestination dest(&true_target, &false_target, true);
-  LoadCondition(expr, &dest, false);
-
-  if (dest.false_was_fall_through()) {
-    // The false target was just bound.
-    JumpTarget loaded;
-    frame_->Push(FACTORY->false_value());
-    // There may be dangling jumps to the true target.
-    if (true_target.is_linked()) {
-      loaded.Jump();
-      true_target.Bind();
-      frame_->Push(FACTORY->true_value());
-      loaded.Bind();
-    }
-
-  } else if (dest.is_used()) {
-    // There is true, and possibly false, control flow (with true as
-    // the fall through).
-    JumpTarget loaded;
-    frame_->Push(FACTORY->true_value());
-    if (false_target.is_linked()) {
-      loaded.Jump();
-      false_target.Bind();
-      frame_->Push(FACTORY->false_value());
-      loaded.Bind();
-    }
-
-  } else {
-    // We have a valid value on top of the frame, but we still may
-    // have dangling jumps to the true and false targets from nested
-    // subexpressions (eg, the left subexpressions of the
-    // short-circuited boolean operators).
-    ASSERT(has_valid_frame());
-    if (true_target.is_linked() || false_target.is_linked()) {
-      JumpTarget loaded;
-      loaded.Jump();  // Don't lose the current TOS.
-      if (true_target.is_linked()) {
-        true_target.Bind();
-        frame_->Push(FACTORY->true_value());
-        if (false_target.is_linked()) {
-          loaded.Jump();
-        }
-      }
-      if (false_target.is_linked()) {
-        false_target.Bind();
-        frame_->Push(FACTORY->false_value());
-      }
-      loaded.Bind();
-    }
-  }
-
-  ASSERT(has_valid_frame());
-  ASSERT(frame_->height() == original_height + 1);
-}
-
-
-void CodeGenerator::LoadGlobal() {
-  if (in_spilled_code()) {
-    frame_->EmitPush(GlobalObjectOperand());
-  } else {
-    Result temp = allocator_->Allocate();
-    __ movq(temp.reg(), GlobalObjectOperand());
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadGlobalReceiver() {
-  Result temp = allocator_->Allocate();
-  Register reg = temp.reg();
-  __ movq(reg, GlobalObjectOperand());
-  __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
-  frame_->Push(&temp);
-}
-
-
-void CodeGenerator::LoadTypeofExpression(Expression* expr) {
-  // Special handling of identifiers as subexpressions of typeof.
-  Variable* variable = expr->AsVariableProxy()->AsVariable();
-  if (variable != NULL && !variable->is_this() && variable->is_global()) {
-    // For a global variable we build the property reference
-    // <global>.<variable> and perform a (regular non-contextual) property
-    // load to make sure we do not get reference errors.
-    Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
-    Literal key(variable->name());
-    Property property(&global, &key, RelocInfo::kNoPosition);
-    Reference ref(this, &property);
-    ref.GetValue();
-  } else if (variable != NULL && variable->AsSlot() != NULL) {
-    // For a variable that rewrites to a slot, we signal it is the immediate
-    // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF);
-  } else {
-    // Anything else can be handled normally.
-    Load(expr);
-  }
-}
-
-
-ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
-  if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
-
-  // In strict mode there is no need for shadow arguments.
-  ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode());
-  // We don't want to do lazy arguments allocation for functions that
-  // have heap-allocated contexts, because it interfers with the
-  // uninitialized const tracking in the context objects.
-  return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode())
-      ? EAGER_ARGUMENTS_ALLOCATION
-      : LAZY_ARGUMENTS_ALLOCATION;
-}
-
-
-Result CodeGenerator::StoreArgumentsObject(bool initial) {
-  ArgumentsAllocationMode mode = ArgumentsMode();
-  ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
-
-  Comment cmnt(masm_, "[ store arguments object");
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
-    // When using lazy arguments allocation, we store the arguments marker value
-    // as a sentinel indicating that the arguments object hasn't been
-    // allocated yet.
-    frame_->Push(FACTORY->arguments_marker());
-  } else {
-    ArgumentsAccessStub stub(is_strict_mode()
-        ? ArgumentsAccessStub::NEW_STRICT
-        : ArgumentsAccessStub::NEW_NON_STRICT);
-    frame_->PushFunction();
-    frame_->PushReceiverSlotAddress();
-    frame_->Push(Smi::FromInt(scope()->num_parameters()));
-    Result result = frame_->CallStub(&stub, 3);
-    frame_->Push(&result);
-  }
-
-  Variable* arguments = scope()->arguments();
-  Variable* shadow = scope()->arguments_shadow();
-  ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
-  ASSERT((shadow != NULL && shadow->AsSlot() != NULL) ||
-         scope()->is_strict_mode());
-
-  JumpTarget done;
-  bool skip_arguments = false;
-  if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
-    // We have to skip storing into the arguments slot if it has
-    // already been written to. This can happen if the a function
-    // has a local variable named 'arguments'.
-    LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
-    if (probe.is_constant()) {
-      // We have to skip updating the arguments object if it has
-      // been assigned a proper value.
-      skip_arguments = !probe.handle()->IsArgumentsMarker();
-    } else {
-      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
-      probe.Unuse();
-      done.Branch(not_equal);
-    }
-  }
-  if (!skip_arguments) {
-    StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
-    if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
-  }
-  if (shadow != NULL) {
-    StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
-  }
-  return frame_->Pop();
-}
-
-//------------------------------------------------------------------------------
-// CodeGenerator implementation of variables, lookups, and stores.
-
-Reference::Reference(CodeGenerator* cgen,
-                     Expression* expression,
-                     bool  persist_after_get)
-    : cgen_(cgen),
-      expression_(expression),
-      type_(ILLEGAL),
-      persist_after_get_(persist_after_get) {
-  cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
-  ASSERT(is_unloaded() || is_illegal());
-}
-
-
-void CodeGenerator::LoadReference(Reference* ref) {
-  // References are loaded from both spilled and unspilled code.  Set the
-  // state to unspilled to allow that (and explicitly spill after
-  // construction at the construction sites).
-  bool was_in_spilled_code = in_spilled_code_;
-  in_spilled_code_ = false;
-
-  Comment cmnt(masm_, "[ LoadReference");
-  Expression* e = ref->expression();
-  Property* property = e->AsProperty();
-  Variable* var = e->AsVariableProxy()->AsVariable();
-
-  if (property != NULL) {
-    // The expression is either a property or a variable proxy that rewrites
-    // to a property.
-    Load(property->obj());
-    if (property->key()->IsPropertyName()) {
-      ref->set_type(Reference::NAMED);
-    } else {
-      Load(property->key());
-      ref->set_type(Reference::KEYED);
-    }
-  } else if (var != NULL) {
-    // The expression is a variable proxy that does not rewrite to a
-    // property.  Global variables are treated as named property references.
-    if (var->is_global()) {
-      // If rax is free, the register allocator prefers it.  Thus the code
-      // generator will load the global object into rax, which is where
-      // LoadIC wants it.  Most uses of Reference call LoadIC directly
-      // after the reference is created.
-      frame_->Spill(rax);
-      LoadGlobal();
-      ref->set_type(Reference::NAMED);
-    } else {
-      ASSERT(var->AsSlot() != NULL);
-      ref->set_type(Reference::SLOT);
-    }
-  } else {
-    // Anything else is a runtime error.
-    Load(e);
-    frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
-  }
-
-  in_spilled_code_ = was_in_spilled_code;
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
-  // Pop a reference from the stack while preserving TOS.
-  Comment cmnt(masm_, "[ UnloadReference");
-  frame_->Nip(ref->size());
-  ref->set_unloaded();
-}
-
-
-// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
-// convert it to a boolean in the condition code register or jump to
-// 'false_target'/'true_target' as appropriate.
-void CodeGenerator::ToBoolean(ControlDestination* dest) {
-  Comment cmnt(masm_, "[ ToBoolean");
-
-  // The value to convert should be popped from the frame.
-  Result value = frame_->Pop();
-  value.ToRegister();
-
-  if (value.is_number()) {
-    // Fast case if TypeInfo indicates only numbers.
-    if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg());
-    }
-    // Smi => false iff zero.
-    __ Cmp(value.reg(), Smi::FromInt(0));
-    if (value.is_smi()) {
-      value.Unuse();
-      dest->Split(not_zero);
-    } else {
-      dest->false_target()->Branch(equal);
-      Condition is_smi = masm_->CheckSmi(value.reg());
-      dest->true_target()->Branch(is_smi);
-      __ xorpd(xmm0, xmm0);
-      __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
-      value.Unuse();
-      dest->Split(not_zero);
-    }
-  } else {
-    // Fast case checks.
-    // 'false' => false.
-    __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
-    dest->false_target()->Branch(equal);
-
-    // 'true' => true.
-    __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
-    dest->true_target()->Branch(equal);
-
-    // 'undefined' => false.
-    __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
-    dest->false_target()->Branch(equal);
-
-    // Smi => false iff zero.
-    __ Cmp(value.reg(), Smi::FromInt(0));
-    dest->false_target()->Branch(equal);
-    Condition is_smi = masm_->CheckSmi(value.reg());
-    dest->true_target()->Branch(is_smi);
-
-    // Call the stub for all other cases.
-    frame_->Push(&value);  // Undo the Pop() from above.
-    ToBooleanStub stub;
-    Result temp = frame_->CallStub(&stub, 1);
-    // Convert the result to a condition code.
-    __ testq(temp.reg(), temp.reg());
-    temp.Unuse();
-    dest->Split(not_equal);
-  }
-}
-
-
-// Call the specialized stub for a binary operation.
-class DeferredInlineBinaryOperation: public DeferredCode {
- public:
-  DeferredInlineBinaryOperation(Token::Value op,
-                                Register dst,
-                                Register left,
-                                Register right,
-                                OverwriteMode mode)
-      : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
-    set_comment("[ DeferredInlineBinaryOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register left_;
-  Register right_;
-  OverwriteMode mode_;
-};
-
-
-void DeferredInlineBinaryOperation::Generate() {
-  Label done;
-  if ((op_ == Token::ADD)
-      || (op_ == Token::SUB)
-      || (op_ == Token::MUL)
-      || (op_ == Token::DIV)) {
-    Label call_runtime;
-    Label left_smi, right_smi, load_right, do_op;
-    __ JumpIfSmi(left_, &left_smi);
-    __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_LEFT) {
-      __ movq(dst_, left_);
-    }
-    __ jmp(&load_right);
-
-    __ bind(&left_smi);
-    __ SmiToInteger32(left_, left_);
-    __ cvtlsi2sd(xmm0, left_);
-    __ Integer32ToSmi(left_, left_);
-    if (mode_ == OVERWRITE_LEFT) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-
-    __ bind(&load_right);
-    __ JumpIfSmi(right_, &right_smi);
-    __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
-    if (mode_ == OVERWRITE_RIGHT) {
-      __ movq(dst_, right_);
-    } else if (mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-    __ jmp(&do_op);
-
-    __ bind(&right_smi);
-    __ SmiToInteger32(right_, right_);
-    __ cvtlsi2sd(xmm1, right_);
-    __ Integer32ToSmi(right_, right_);
-    if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
-      Label alloc_failure;
-      __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
-    }
-
-    __ bind(&do_op);
-    switch (op_) {
-      case Token::ADD: __ addsd(xmm0, xmm1); break;
-      case Token::SUB: __ subsd(xmm0, xmm1); break;
-      case Token::MUL: __ mulsd(xmm0, xmm1); break;
-      case Token::DIV: __ divsd(xmm0, xmm1); break;
-      default: UNREACHABLE();
-    }
-    __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
-    __ jmp(&done);
-
-    __ bind(&call_runtime);
-  }
-  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, left_, right_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-  __ bind(&done);
-}
-
-
-static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
-                                  Token::Value op,
-                                  const Result& right,
-                                  const Result& left) {
-  // Set TypeInfo of result according to the operation performed.
-  // We rely on the fact that smis have a 32 bit payload on x64.
-  STATIC_ASSERT(kSmiValueSize == 32);
-  switch (op) {
-    case Token::COMMA:
-      return right.type_info();
-    case Token::OR:
-    case Token::AND:
-      // Result type can be either of the two input types.
-      return operands_type;
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND:
-      // Result is always a smi.
-      return TypeInfo::Smi();
-    case Token::SAR:
-    case Token::SHL:
-      // Result is always a smi.
-      return TypeInfo::Smi();
-    case Token::SHR:
-      // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
-      return (right.is_constant() && right.handle()->IsSmi()
-                     && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
-          ? TypeInfo::Smi()
-          : TypeInfo::Number();
-    case Token::ADD:
-      if (operands_type.IsNumber()) {
-        return TypeInfo::Number();
-      } else if (left.type_info().IsString() || right.type_info().IsString()) {
-        return TypeInfo::String();
-      } else {
-        return TypeInfo::Unknown();
-      }
-    case Token::SUB:
-    case Token::MUL:
-    case Token::DIV:
-    case Token::MOD:
-      // Result is always a number.
-      return TypeInfo::Number();
-    default:
-      UNREACHABLE();
-  }
-  UNREACHABLE();
-  return TypeInfo::Unknown();
-}
-
-
-void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
-                                           OverwriteMode overwrite_mode) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-  Token::Value op = expr->op();
-  Comment cmnt_token(masm_, Token::String(op));
-
-  if (op == Token::COMMA) {
-    // Simply discard left value.
-    frame_->Nip(1);
-    return;
-  }
-
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-
-  if (op == Token::ADD) {
-    const bool left_is_string = left.type_info().IsString();
-    const bool right_is_string = right.type_info().IsString();
-    // Make sure constant strings have string type info.
-    ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
-           left_is_string);
-    ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
-           right_is_string);
-    if (left_is_string || right_is_string) {
-      frame_->Push(&left);
-      frame_->Push(&right);
-      Result answer;
-      if (left_is_string) {
-        if (right_is_string) {
-          StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-          answer = frame_->CallStub(&stub, 2);
-        } else {
-          answer =
-            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
-        }
-      } else if (right_is_string) {
-        answer =
-          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
-      }
-      answer.set_type_info(TypeInfo::String());
-      frame_->Push(&answer);
-      return;
-    }
-    // Neither operand is known to be a string.
-  }
-
-  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi_constant =
-      right.is_constant() && !right.handle()->IsSmi();
-
-  if (left_is_smi_constant && right_is_smi_constant) {
-    // Compute the constant result at compile time, and leave it on the frame.
-    int left_int = Smi::cast(*left.handle())->value();
-    int right_int = Smi::cast(*right.handle())->value();
-    if (FoldConstantSmis(op, left_int, right_int)) return;
-  }
-
-  // Get number type of left and right sub-expressions.
-  TypeInfo operands_type =
-      TypeInfo::Combine(left.type_info(), right.type_info());
-
-  TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
-
-  Result answer;
-  if (left_is_non_smi_constant || right_is_non_smi_constant) {
-    // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op,
-                             overwrite_mode,
-                             NO_SMI_CODE_IN_STUB,
-                             operands_type);
-    answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-  } else if (right_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
-                                        false, overwrite_mode);
-  } else if (left_is_smi_constant) {
-    answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
-                                        true, overwrite_mode);
-  } else {
-    // Set the flags based on the operation, type and loop nesting level.
-    // Bit operations always assume they likely operate on smis. Still only
-    // generate the inline Smi check code if this operation is part of a loop.
-    // For all other operations only inline the Smi check code for likely smis
-    // if the operation is part of a loop.
-    if (loop_nesting() > 0 &&
-        (Token::IsBitOp(op) ||
-         operands_type.IsInteger32() ||
-         expr->type()->IsLikelySmi())) {
-      answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
-    } else {
-      GenericBinaryOpStub stub(op,
-                               overwrite_mode,
-                               NO_GENERIC_BINARY_FLAGS,
-                               operands_type);
-      answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right);
-    }
-  }
-
-  answer.set_type_info(result_type);
-  frame_->Push(&answer);
-}
-
-
-bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
-  Object* answer_object = HEAP->undefined_value();
-  switch (op) {
-    case Token::ADD:
-      // Use intptr_t to detect overflow of 32-bit int.
-      if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
-        answer_object = Smi::FromInt(left + right);
-      }
-      break;
-    case Token::SUB:
-      // Use intptr_t to detect overflow of 32-bit int.
-      if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
-        answer_object = Smi::FromInt(left - right);
-      }
-      break;
-    case Token::MUL: {
-        double answer = static_cast<double>(left) * right;
-        if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
-          // If the product is zero and the non-zero factor is negative,
-          // the spec requires us to return floating point negative zero.
-          if (answer != 0 || (left >= 0 && right >= 0)) {
-            answer_object = Smi::FromInt(static_cast<int>(answer));
-          }
-        }
-      }
-      break;
-    case Token::DIV:
-    case Token::MOD:
-      break;
-    case Token::BIT_OR:
-      answer_object = Smi::FromInt(left | right);
-      break;
-    case Token::BIT_AND:
-      answer_object = Smi::FromInt(left & right);
-      break;
-    case Token::BIT_XOR:
-      answer_object = Smi::FromInt(left ^ right);
-      break;
-
-    case Token::SHL: {
-        int shift_amount = right & 0x1F;
-        if (Smi::IsValid(left << shift_amount)) {
-          answer_object = Smi::FromInt(left << shift_amount);
-        }
-        break;
-      }
-    case Token::SHR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        unsigned_left >>= shift_amount;
-        if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
-          answer_object = Smi::FromInt(unsigned_left);
-        }
-        break;
-      }
-    case Token::SAR: {
-        int shift_amount = right & 0x1F;
-        unsigned int unsigned_left = left;
-        if (left < 0) {
-          // Perform arithmetic shift of a negative number by
-          // complementing number, logical shifting, complementing again.
-          unsigned_left = ~unsigned_left;
-          unsigned_left >>= shift_amount;
-          unsigned_left = ~unsigned_left;
-        } else {
-          unsigned_left >>= shift_amount;
-        }
-        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
-        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
-        break;
-      }
-    default:
-      UNREACHABLE();
-      break;
-  }
-  if (answer_object->IsUndefined()) {
-    return false;
-  }
-  frame_->Push(Handle<Object>(answer_object));
-  return true;
-}
-
-
-void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
-                                               Result* right,
-                                               JumpTarget* both_smi) {
-  TypeInfo left_info = left->type_info();
-  TypeInfo right_info = right->type_info();
-  if (left_info.IsDouble() || left_info.IsString() ||
-      right_info.IsDouble() || right_info.IsString()) {
-    // We know that left and right are not both smi.  Don't do any tests.
-    return;
-  }
-
-  if (left->reg().is(right->reg())) {
-    if (!left_info.IsSmi()) {
-      Condition is_smi = masm()->CheckSmi(left->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  } else if (!left_info.IsSmi()) {
-    if (!right_info.IsSmi()) {
-      Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      Condition is_smi = masm()->CheckSmi(left->reg());
-      both_smi->Branch(is_smi);
-    }
-  } else {
-    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
-    if (!right_info.IsSmi()) {
-      Condition is_smi = masm()->CheckSmi(right->reg());
-      both_smi->Branch(is_smi);
-    } else {
-      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
-      left->Unuse();
-      right->Unuse();
-      both_smi->Jump();
-    }
-  }
-}
-
-
-void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
-                                              TypeInfo type,
-                                              DeferredCode* deferred) {
-  if (!type.IsSmi()) {
-        __ JumpIfNotSmi(reg, deferred->entry_label());
-  }
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(reg);
-  }
-}
-
-
-void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                                  Register right,
-                                                  TypeInfo left_info,
-                                                  TypeInfo right_info,
-                                                  DeferredCode* deferred) {
-  if (!left_info.IsSmi() && !right_info.IsSmi()) {
-    __ JumpIfNotBothSmi(left, right, deferred->entry_label());
-  } else if (!left_info.IsSmi()) {
-    __ JumpIfNotSmi(left, deferred->entry_label());
-  } else if (!right_info.IsSmi()) {
-    __ JumpIfNotSmi(right, deferred->entry_label());
-  }
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(left);
-    __ AbortIfNotSmi(right);
-  }
-}
-
-
-// Implements a binary operation using a deferred code object and some
-// inline code to operate on smis quickly.
-Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
-                                               Result* left,
-                                               Result* right,
-                                               OverwriteMode overwrite_mode) {
-  // Copy the type info because left and right may be overwritten.
-  TypeInfo left_type_info = left->type_info();
-  TypeInfo right_type_info = right->type_info();
-  Token::Value op = expr->op();
-  Result answer;
-  // Special handling of div and mod because they use fixed registers.
-  if (op == Token::DIV || op == Token::MOD) {
-    // We need rax as the quotient register, rdx as the remainder
-    // register, neither left nor right in rax or rdx, and left copied
-    // to rax.
-    Result quotient;
-    Result remainder;
-    bool left_is_in_rax = false;
-    // Step 1: get rax for quotient.
-    if ((left->is_register() && left->reg().is(rax)) ||
-        (right->is_register() && right->reg().is(rax))) {
-      // One or both is in rax.  Use a fresh non-rdx register for
-      // them.
-      Result fresh = allocator_->Allocate();
-      ASSERT(fresh.is_valid());
-      if (fresh.reg().is(rdx)) {
-        remainder = fresh;
-        fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-      }
-      if (left->is_register() && left->reg().is(rax)) {
-        quotient = *left;
-        *left = fresh;
-        left_is_in_rax = true;
-      }
-      if (right->is_register() && right->reg().is(rax)) {
-        quotient = *right;
-        *right = fresh;
-      }
-      __ movq(fresh.reg(), rax);
-    } else {
-      // Neither left nor right is in rax.
-      quotient = allocator_->Allocate(rax);
-    }
-    ASSERT(quotient.is_register() && quotient.reg().is(rax));
-    ASSERT(!(left->is_register() && left->reg().is(rax)));
-    ASSERT(!(right->is_register() && right->reg().is(rax)));
-
-    // Step 2: get rdx for remainder if necessary.
-    if (!remainder.is_valid()) {
-      if ((left->is_register() && left->reg().is(rdx)) ||
-          (right->is_register() && right->reg().is(rdx))) {
-        Result fresh = allocator_->Allocate();
-        ASSERT(fresh.is_valid());
-        if (left->is_register() && left->reg().is(rdx)) {
-          remainder = *left;
-          *left = fresh;
-        }
-        if (right->is_register() && right->reg().is(rdx)) {
-          remainder = *right;
-          *right = fresh;
-        }
-        __ movq(fresh.reg(), rdx);
-      } else {
-        // Neither left nor right is in rdx.
-        remainder = allocator_->Allocate(rdx);
-      }
-    }
-    ASSERT(remainder.is_register() && remainder.reg().is(rdx));
-    ASSERT(!(left->is_register() && left->reg().is(rdx)));
-    ASSERT(!(right->is_register() && right->reg().is(rdx)));
-
-    left->ToRegister();
-    right->ToRegister();
-    frame_->Spill(rax);
-    frame_->Spill(rdx);
-
-    // Check that left and right are smi tagged.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          (op == Token::DIV) ? rax : rdx,
-                                          left->reg(),
-                                          right->reg(),
-                                          overwrite_mode);
-    JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
-                                  left_type_info, right_type_info, deferred);
-
-    if (op == Token::DIV) {
-      __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = quotient;
-    } else {
-      ASSERT(op == Token::MOD);
-      __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
-      deferred->BindExit();
-      left->Unuse();
-      right->Unuse();
-      answer = remainder;
-    }
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Special handling of shift operations because they use fixed
-  // registers.
-  if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
-    // Move left out of rcx if necessary.
-    if (left->is_register() && left->reg().is(rcx)) {
-      *left = allocator_->Allocate();
-      ASSERT(left->is_valid());
-      __ movq(left->reg(), rcx);
-    }
-    right->ToRegister(rcx);
-    left->ToRegister();
-    ASSERT(left->is_register() && !left->reg().is(rcx));
-    ASSERT(right->is_register() && right->reg().is(rcx));
-
-    // We will modify right, it must be spilled.
-    frame_->Spill(rcx);
-
-    // Use a fresh answer register to avoid spilling the left operand.
-    answer = allocator_->Allocate();
-    ASSERT(answer.is_valid());
-    // Check that both operands are smis using the answer register as a
-    // temporary.
-    DeferredInlineBinaryOperation* deferred =
-        new DeferredInlineBinaryOperation(op,
-                                          answer.reg(),
-                                          left->reg(),
-                                          rcx,
-                                          overwrite_mode);
-
-    Label do_op;
-    // Left operand must be unchanged in left->reg() for deferred code.
-    // Left operand is in answer.reg(), possibly converted to int32, for
-    // inline code.
-    __ movq(answer.reg(), left->reg());
-    if (right_type_info.IsSmi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(right->reg());
-      }
-      // If left is not known to be a smi, check if it is.
-      // If left is not known to be a number, and it isn't a smi, check if
-      // it is a HeapNumber.
-      if (!left_type_info.IsSmi()) {
-        __ JumpIfSmi(answer.reg(), &do_op);
-        if (!left_type_info.IsNumber()) {
-          // Branch if not a heapnumber.
-          __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
-                 FACTORY->heap_number_map());
-          deferred->Branch(not_equal);
-        }
-        // Load integer value into answer register using truncation.
-        __ cvttsd2si(answer.reg(),
-                     FieldOperand(answer.reg(), HeapNumber::kValueOffset));
-        // Branch if we might have overflowed.
-        // (False negative for Smi::kMinValue)
-        __ cmpl(answer.reg(), Immediate(0x80000000));
-        deferred->Branch(equal);
-        // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
-        __ Integer32ToSmi(answer.reg(), answer.reg());
-      } else {
-        // Fast case - both are actually smis.
-        if (FLAG_debug_code) {
-          __ AbortIfNotSmi(left->reg());
-        }
-      }
-    } else {
-      JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
-                                    left_type_info, right_type_info, deferred);
-    }
-    __ bind(&do_op);
-
-    // Perform the operation.
-    switch (op) {
-      case Token::SAR:
-        __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx);
-        break;
-      case Token::SHR: {
-        __ SmiShiftLogicalRight(answer.reg(),
-                                answer.reg(),
-                                rcx,
-                                deferred->entry_label());
-        break;
-      }
-      case Token::SHL: {
-        __ SmiShiftLeft(answer.reg(),
-                        answer.reg(),
-                        rcx);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-    deferred->BindExit();
-    left->Unuse();
-    right->Unuse();
-    ASSERT(answer.is_valid());
-    return answer;
-  }
-
-  // Handle the other binary operations.
-  left->ToRegister();
-  right->ToRegister();
-  // A newly allocated register answer is used to hold the answer.  The
-  // registers containing left and right are not modified so they don't
-  // need to be spilled in the fast case.
-  answer = allocator_->Allocate();
-  ASSERT(answer.is_valid());
-
-  // Perform the smi tag check.
-  DeferredInlineBinaryOperation* deferred =
-      new DeferredInlineBinaryOperation(op,
-                                        answer.reg(),
-                                        left->reg(),
-                                        right->reg(),
-                                        overwrite_mode);
-  JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
-                                left_type_info, right_type_info, deferred);
-
-  switch (op) {
-    case Token::ADD:
-      __ SmiAdd(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-
-    case Token::SUB:
-      __ SmiSub(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-
-    case Token::MUL: {
-      __ SmiMul(answer.reg(),
-                left->reg(),
-                right->reg(),
-                deferred->entry_label());
-      break;
-    }
-
-    case Token::BIT_OR:
-      __ SmiOr(answer.reg(), left->reg(), right->reg());
-      break;
-
-    case Token::BIT_AND:
-      __ SmiAnd(answer.reg(), left->reg(), right->reg());
-      break;
-
-    case Token::BIT_XOR:
-      __ SmiXor(answer.reg(), left->reg(), right->reg());
-      break;
-
-    default:
-      UNREACHABLE();
-      break;
-  }
-  deferred->BindExit();
-  left->Unuse();
-  right->Unuse();
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-// Call the appropriate binary operation stub to compute src op value
-// and leave the result in dst.
-class DeferredInlineSmiOperation: public DeferredCode {
- public:
-  DeferredInlineSmiOperation(Token::Value op,
-                             Register dst,
-                             Register src,
-                             Smi* value,
-                             OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        src_(src),
-        value_(value),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Register src_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperation::Generate() {
-  // For mod we don't generate all the Smi code inline.
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, src_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// Call the appropriate binary operation stub to compute value op src
-// and leave the result in dst.
-class DeferredInlineSmiOperationReversed: public DeferredCode {
- public:
-  DeferredInlineSmiOperationReversed(Token::Value op,
-                                     Register dst,
-                                     Smi* value,
-                                     Register src,
-                                     OverwriteMode overwrite_mode)
-      : op_(op),
-        dst_(dst),
-        value_(value),
-        src_(src),
-        overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiOperationReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Token::Value op_;
-  Register dst_;
-  Smi* value_;
-  Register src_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiOperationReversed::Generate() {
-  GenericBinaryOpStub stub(
-      op_,
-      overwrite_mode_,
-      NO_SMI_CODE_IN_STUB);
-  stub.GenerateCall(masm_, value_, src_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
-  DeferredInlineSmiAdd(Register dst,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAdd");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAdd::Generate() {
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The result of value + src is in dst.  It either overflowed or was not
-// smi tagged.  Undo the speculative addition and call the appropriate
-// specialized stub for add.  The result is left in dst.
-class DeferredInlineSmiAddReversed: public DeferredCode {
- public:
-  DeferredInlineSmiAddReversed(Register dst,
-                               Smi* value,
-                               OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiAddReversed");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiAddReversed::Generate() {
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, value_, dst_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredInlineSmiSub: public DeferredCode {
- public:
-  DeferredInlineSmiSub(Register dst,
-                       Smi* value,
-                       OverwriteMode overwrite_mode)
-      : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
-    set_comment("[ DeferredInlineSmiSub");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Smi* value_;
-  OverwriteMode overwrite_mode_;
-};
-
-
-void DeferredInlineSmiSub::Generate() {
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
-  igostub.GenerateCall(masm_, dst_, value_);
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                                 Result* operand,
-                                                 Handle<Object> value,
-                                                 bool reversed,
-                                                 OverwriteMode overwrite_mode) {
-  // Generate inline code for a binary operation when one of the
-  // operands is a constant smi.  Consumes the argument "operand".
-  if (IsUnsafeSmi(value)) {
-    Result unsafe_operand(value);
-    if (reversed) {
-      return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
-                               overwrite_mode);
-    } else {
-      return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
-                               overwrite_mode);
-    }
-  }
-
-  // Get the literal value.
-  Smi* smi_value = Smi::cast(*value);
-  int int_value = smi_value->value();
-
-  Token::Value op = expr->op();
-  Result answer;
-  switch (op) {
-    case Token::ADD: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-      DeferredCode* deferred = NULL;
-      if (reversed) {
-        deferred = new DeferredInlineSmiAddReversed(operand->reg(),
-                                                    smi_value,
-                                                    overwrite_mode);
-      } else {
-        deferred = new DeferredInlineSmiAdd(operand->reg(),
-                                            smi_value,
-                                            overwrite_mode);
-      }
-      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                deferred);
-      __ SmiAddConstant(operand->reg(),
-                        operand->reg(),
-                        smi_value,
-                        deferred->entry_label());
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    case Token::SUB: {
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        answer = *operand;
-        DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
-                                                          smi_value,
-                                                          overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        // A smi currently fits in a 32-bit Immediate.
-        __ SmiSubConstant(operand->reg(),
-                          operand->reg(),
-                          smi_value,
-                          deferred->entry_label());
-        deferred->BindExit();
-        operand->Unuse();
-      }
-      break;
-    }
-
-    case Token::SAR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        __ SmiShiftArithmeticRightConstant(operand->reg(),
-                                           operand->reg(),
-                                           shift_value);
-        deferred->BindExit();
-        answer = *operand;
-      }
-      break;
-
-    case Token::SHR:
-      if (reversed) {
-        Result constant_operand(value);
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        answer = allocator()->Allocate();
-        ASSERT(answer.is_valid());
-        DeferredInlineSmiOperation* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           answer.reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-        __ SmiShiftLogicalRightConstant(answer.reg(),
-                                        operand->reg(),
-                                        shift_value,
-                                        deferred->entry_label());
-        deferred->BindExit();
-        operand->Unuse();
-      }
-      break;
-
-    case Token::SHL:
-      if (reversed) {
-        operand->ToRegister();
-
-        // We need rcx to be available to hold operand, and to be spilled.
-        // SmiShiftLeft implicitly modifies rcx.
-        if (operand->reg().is(rcx)) {
-          frame_->Spill(operand->reg());
-          answer = allocator()->Allocate();
-        } else {
-          Result rcx_reg = allocator()->Allocate(rcx);
-          // answer must not be rcx.
-          answer = allocator()->Allocate();
-          // rcx_reg goes out of scope.
-        }
-
-        DeferredInlineSmiOperationReversed* deferred =
-            new DeferredInlineSmiOperationReversed(op,
-                                                   answer.reg(),
-                                                   smi_value,
-                                                   operand->reg(),
-                                                   overwrite_mode);
-        JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                  deferred);
-
-        __ Move(answer.reg(), smi_value);
-        __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
-        operand->Unuse();
-
-        deferred->BindExit();
-      } else {
-        // Only the least significant 5 bits of the shift value are used.
-        // In the slow case, this masking is done inside the runtime call.
-        int shift_value = int_value & 0x1f;
-        operand->ToRegister();
-        if (shift_value == 0) {
-          // Spill operand so it can be overwritten in the slow case.
-          frame_->Spill(operand->reg());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             operand->reg(),
-                                             operand->reg(),
-                                             smi_value,
-                                             overwrite_mode);
-          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                    deferred);
-          deferred->BindExit();
-          answer = *operand;
-        } else {
-          // Use a fresh temporary for nonzero shift values.
-          answer = allocator()->Allocate();
-          ASSERT(answer.is_valid());
-          DeferredInlineSmiOperation* deferred =
-              new DeferredInlineSmiOperation(op,
-                                             answer.reg(),
-                                             operand->reg(),
-                                             smi_value,
-                                             overwrite_mode);
-          JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                    deferred);
-          __ SmiShiftLeftConstant(answer.reg(),
-                                  operand->reg(),
-                                  shift_value);
-          deferred->BindExit();
-          operand->Unuse();
-        }
-      }
-      break;
-
-    case Token::BIT_OR:
-    case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      operand->ToRegister();
-      frame_->Spill(operand->reg());
-      if (reversed) {
-        // Bit operations with a constant smi are commutative.
-        // We can swap left and right operands with no problem.
-        // Swap left and right overwrite modes.  0->0, 1->2, 2->1.
-        overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
-      }
-      DeferredCode* deferred =  new DeferredInlineSmiOperation(op,
-                                                               operand->reg(),
-                                                               operand->reg(),
-                                                               smi_value,
-                                                               overwrite_mode);
-      JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
-                                deferred);
-      if (op == Token::BIT_AND) {
-        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
-      } else if (op == Token::BIT_XOR) {
-        if (int_value != 0) {
-          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
-        }
-      } else {
-        ASSERT(op == Token::BIT_OR);
-        if (int_value != 0) {
-          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
-        }
-      }
-      deferred->BindExit();
-      answer = *operand;
-      break;
-    }
-
-    // Generate inline code for mod of powers of 2 and negative powers of 2.
-    case Token::MOD:
-      if (!reversed &&
-          int_value != 0 &&
-          (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
-        operand->ToRegister();
-        frame_->Spill(operand->reg());
-        DeferredCode* deferred =
-            new DeferredInlineSmiOperation(op,
-                                           operand->reg(),
-                                           operand->reg(),
-                                           smi_value,
-                                           overwrite_mode);
-        __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label());
-        if (int_value < 0) int_value = -int_value;
-        if (int_value == 1) {
-          __ Move(operand->reg(), Smi::FromInt(0));
-        } else {
-          __ SmiAndConstant(operand->reg(),
-                            operand->reg(),
-                            Smi::FromInt(int_value - 1));
-        }
-        deferred->BindExit();
-        answer = *operand;
-        break;  // This break only applies if we generated code for MOD.
-      }
-      // Fall through if we did not find a power of 2 on the right hand side!
-      // The next case must be the default.
-
-    default: {
-      Result constant_operand(value);
-      if (reversed) {
-        answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
-                                          overwrite_mode);
-      } else {
-        answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
-                                          overwrite_mode);
-      }
-      break;
-    }
-  }
-  ASSERT(answer.is_valid());
-  return answer;
-}
-
-
-static bool CouldBeNaN(const Result& result) {
-  if (result.type_info().IsSmi()) return false;
-  if (result.type_info().IsInteger32()) return false;
-  if (!result.is_constant()) return true;
-  if (!result.handle()->IsHeapNumber()) return false;
-  return isnan(HeapNumber::cast(*result.handle())->value());
-}
-
-
-// Convert from signed to unsigned comparison to match the way EFLAGS are set
-// by FPU and XMM compare instructions.
-static Condition DoubleCondition(Condition cc) {
-  switch (cc) {
-    case less:          return below;
-    case equal:         return equal;
-    case less_equal:    return below_equal;
-    case greater:       return above;
-    case greater_equal: return above_equal;
-    default:            UNREACHABLE();
-  }
-  UNREACHABLE();
-  return equal;
-}
-
-
-static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
-                                        bool inline_number_compare) {
-  CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
-  if (nan_info == kCantBothBeNaN) {
-    flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
-  }
-  if (inline_number_compare) {
-    flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
-  }
-  return flags;
-}
-
-
-void CodeGenerator::Comparison(AstNode* node,
-                               Condition cc,
-                               bool strict,
-                               ControlDestination* dest) {
-  // Strict only makes sense for equality comparisons.
-  ASSERT(!strict || cc == equal);
-
-  Result left_side;
-  Result right_side;
-  // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
-  if (cc == greater || cc == less_equal) {
-    cc = ReverseCondition(cc);
-    left_side = frame_->Pop();
-    right_side = frame_->Pop();
-  } else {
-    right_side = frame_->Pop();
-    left_side = frame_->Pop();
-  }
-  ASSERT(cc == less || cc == equal || cc == greater_equal);
-
-  // If either side is a constant smi, optimize the comparison.
-  bool left_side_constant_smi = false;
-  bool left_side_constant_null = false;
-  bool left_side_constant_1_char_string = false;
-  if (left_side.is_constant()) {
-    left_side_constant_smi = left_side.handle()->IsSmi();
-    left_side_constant_null = left_side.handle()->IsNull();
-    left_side_constant_1_char_string =
-        (left_side.handle()->IsString() &&
-         String::cast(*left_side.handle())->length() == 1 &&
-         String::cast(*left_side.handle())->IsAsciiRepresentation());
-  }
-  bool right_side_constant_smi = false;
-  bool right_side_constant_null = false;
-  bool right_side_constant_1_char_string = false;
-  if (right_side.is_constant()) {
-    right_side_constant_smi = right_side.handle()->IsSmi();
-    right_side_constant_null = right_side.handle()->IsNull();
-    right_side_constant_1_char_string =
-        (right_side.handle()->IsString() &&
-         String::cast(*right_side.handle())->length() == 1 &&
-         String::cast(*right_side.handle())->IsAsciiRepresentation());
-  }
-
-  if (left_side_constant_smi || right_side_constant_smi) {
-    bool is_loop_condition = (node->AsExpression() != NULL) &&
-        node->AsExpression()->is_loop_condition();
-    ConstantSmiComparison(cc, strict, dest, &left_side, &right_side,
-                          left_side_constant_smi, right_side_constant_smi,
-                          is_loop_condition);
-  } else if (left_side_constant_1_char_string ||
-             right_side_constant_1_char_string) {
-    if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
-      // Trivial case, comparing two constants.
-      int left_value = String::cast(*left_side.handle())->Get(0);
-      int right_value = String::cast(*right_side.handle())->Get(0);
-      switch (cc) {
-        case less:
-          dest->Goto(left_value < right_value);
-          break;
-        case equal:
-          dest->Goto(left_value == right_value);
-          break;
-        case greater_equal:
-          dest->Goto(left_value >= right_value);
-          break;
-        default:
-          UNREACHABLE();
-      }
-    } else {
-      // Only one side is a constant 1 character string.
-      // If left side is a constant 1-character string, reverse the operands.
-      // Since one side is a constant string, conversion order does not matter.
-      if (left_side_constant_1_char_string) {
-        Result temp = left_side;
-        left_side = right_side;
-        right_side = temp;
-        cc = ReverseCondition(cc);
-        // This may reintroduce greater or less_equal as the value of cc.
-        // CompareStub and the inline code both support all values of cc.
-      }
-      // Implement comparison against a constant string, inlining the case
-      // where both sides are strings.
-      left_side.ToRegister();
-
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_not_string, is_string;
-      Register left_reg = left_side.reg();
-      Handle<Object> right_val = right_side.handle();
-      ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
-      Condition is_smi = masm()->CheckSmi(left_reg);
-      is_not_string.Branch(is_smi, &left_side);
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ movq(temp.reg(),
-              FieldOperand(left_reg, HeapObject::kMapOffset));
-      __ movzxbl(temp.reg(),
-                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-      // If we are testing for equality then make use of the symbol shortcut.
-      // Check if the left hand side has the same type as the right hand
-      // side (which is always a symbol).
-      if (cc == equal) {
-        Label not_a_symbol;
-        STATIC_ASSERT(kSymbolTag != 0);
-        // Ensure that no non-strings have the symbol bit set.
-        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
-        __ testb(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
-        __ j(zero, &not_a_symbol);
-        // They are symbols, so do identity compare.
-        __ Cmp(left_reg, right_side.handle());
-        dest->true_target()->Branch(equal);
-        dest->false_target()->Branch(not_equal);
-        __ bind(&not_a_symbol);
-      }
-      // Call the compare stub if the left side is not a flat ascii string.
-      __ andb(temp.reg(),
-              Immediate(kIsNotStringMask |
-                        kStringRepresentationMask |
-                        kStringEncodingMask));
-      __ cmpb(temp.reg(),
-              Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-      temp.Unuse();
-      is_string.Branch(equal, &left_side);
-
-      // Setup and call the compare stub.
-      is_not_string.Bind(&left_side);
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, &left_side, &right_side);
-      result.ToRegister();
-      __ testq(result.reg(), result.reg());
-      result.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_string.Bind(&left_side);
-      // left_side is a sequential ASCII string.
-      ASSERT(left_side.reg().is(left_reg));
-      right_side = Result(right_val);
-      Result temp2 = allocator_->Allocate();
-      ASSERT(temp2.is_valid());
-      // Test string equality and comparison.
-      if (cc == equal) {
-        Label comparison_done;
-        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                      Smi::FromInt(1));
-        __ j(not_equal, &comparison_done);
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_val)->Get(0));
-        __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
-                Immediate(char_value));
-        __ bind(&comparison_done);
-      } else {
-        __ movq(temp2.reg(),
-                FieldOperand(left_side.reg(), String::kLengthOffset));
-        __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
-        Label comparison;
-        // If the length is 0 then the subtraction gave -1 which compares less
-        // than any character.
-        __ j(negative, &comparison);
-        // Otherwise load the first character.
-        __ movzxbl(temp2.reg(),
-                   FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
-        __ bind(&comparison);
-        // Compare the first character of the string with the
-        // constant 1-character string.
-        uint8_t char_value =
-            static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
-        __ cmpb(temp2.reg(), Immediate(char_value));
-        Label characters_were_different;
-        __ j(not_equal, &characters_were_different);
-        // If the first character is the same then the long string sorts after
-        // the short one.
-        __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                      Smi::FromInt(1));
-        __ bind(&characters_were_different);
-      }
-      temp2.Unuse();
-      left_side.Unuse();
-      right_side.Unuse();
-      dest->Split(cc);
-    }
-  } else {
-    // Neither side is a constant Smi, constant 1-char string, or constant null.
-    // If either side is a non-smi constant, or known to be a heap number,
-    // skip the smi check.
-    bool known_non_smi =
-        (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
-        (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
-        left_side.type_info().IsDouble() ||
-        right_side.type_info().IsDouble();
-
-    NaNInformation nan_info =
-        (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
-        kBothCouldBeNaN :
-        kCantBothBeNaN;
-
-    // Inline number comparison handling any combination of smi's and heap
-    // numbers if:
-    //   code is in a loop
-    //   the compare operation is different from equal
-    //   compare is not a for-loop comparison
-    // The reason for excluding equal is that it will most likely be done
-    // with smi's (not heap numbers) and the code to comparing smi's is inlined
-    // separately. The same reason applies for for-loop comparison which will
-    // also most likely be smi comparisons.
-    bool is_loop_condition = (node->AsExpression() != NULL)
-        && node->AsExpression()->is_loop_condition();
-    bool inline_number_compare =
-        loop_nesting() > 0 && cc != equal && !is_loop_condition;
-
-    // Left and right needed in registers for the following code.
-    left_side.ToRegister();
-    right_side.ToRegister();
-
-    if (known_non_smi) {
-      // Inlined equality check:
-      // If at least one of the objects is not NaN, then if the objects
-      // are identical, they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
-        __ cmpq(left_side.reg(), right_side.reg());
-        dest->true_target()->Branch(equal);
-      }
-
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-      }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
-      CompareStub stub(cc, strict, flags);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flag.
-      answer.Unuse();
-      dest->Split(cc);
-    } else {
-      // Here we split control flow to the stub call and inlined cases
-      // before finally splitting it to the control destination.  We use
-      // a jump target and branching to duplicate the virtual frame at
-      // the first split.  We manually handle the off-frame references
-      // by reconstituting them on the non-fall-through path.
-      JumpTarget is_smi;
-      Register left_reg = left_side.reg();
-      Register right_reg = right_side.reg();
-
-      // In-line check for comparing two smis.
-      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
-
-      if (has_valid_frame()) {
-        // Inline the equality check if both operands can't be a NaN. If both
-        // objects are the same they are equal.
-        if (nan_info == kCantBothBeNaN && cc == equal) {
-          __ cmpq(left_side.reg(), right_side.reg());
-          dest->true_target()->Branch(equal);
-        }
-
-        // Inlined number comparison:
-        if (inline_number_compare) {
-          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-        }
-
-        // End of in-line compare, call out to the compare stub. Don't include
-        // number comparison in the stub if it was inlined.
-        CompareFlags flags =
-            ComputeCompareFlags(nan_info, inline_number_compare);
-        CompareStub stub(cc, strict, flags);
-        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-        __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
-        answer.Unuse();
-        if (is_smi.is_linked()) {
-          dest->true_target()->Branch(cc);
-          dest->false_target()->Jump();
-        } else {
-          dest->Split(cc);
-        }
-      }
-
-      if (is_smi.is_linked()) {
-        is_smi.Bind();
-        left_side = Result(left_reg);
-        right_side = Result(right_reg);
-        __ SmiCompare(left_side.reg(), right_side.reg());
-        right_side.Unuse();
-        left_side.Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::ConstantSmiComparison(Condition cc,
-                                          bool strict,
-                                          ControlDestination* dest,
-                                          Result* left_side,
-                                          Result* right_side,
-                                          bool left_side_constant_smi,
-                                          bool right_side_constant_smi,
-                                          bool is_loop_condition) {
-  if (left_side_constant_smi && right_side_constant_smi) {
-    // Trivial case, comparing two constants.
-    int left_value = Smi::cast(*left_side->handle())->value();
-    int right_value = Smi::cast(*right_side->handle())->value();
-    switch (cc) {
-      case less:
-        dest->Goto(left_value < right_value);
-        break;
-      case equal:
-        dest->Goto(left_value == right_value);
-        break;
-      case greater_equal:
-        dest->Goto(left_value >= right_value);
-        break;
-      default:
-        UNREACHABLE();
-    }
-  } else {
-    // Only one side is a constant Smi.
-    // If left side is a constant Smi, reverse the operands.
-    // Since one side is a constant Smi, conversion order does not matter.
-    if (left_side_constant_smi) {
-      Result* temp = left_side;
-      left_side = right_side;
-      right_side = temp;
-      cc = ReverseCondition(cc);
-      // This may re-introduce greater or less_equal as the value of cc.
-      // CompareStub and the inline code both support all values of cc.
-    }
-    // Implement comparison against a constant Smi, inlining the case
-    // where both sides are smis.
-    left_side->ToRegister();
-    Register left_reg = left_side->reg();
-    Smi* constant_smi = Smi::cast(*right_side->handle());
-
-    if (left_side->is_smi()) {
-      if (FLAG_debug_code) {
-        __ AbortIfNotSmi(left_reg);
-      }
-      // Test smi equality and comparison by signed int comparison.
-      __ SmiCompare(left_reg, constant_smi);
-      left_side->Unuse();
-      right_side->Unuse();
-      dest->Split(cc);
-    } else {
-      // Only the case where the left side could possibly be a non-smi is left.
-      JumpTarget is_smi;
-      if (cc == equal) {
-        // We can do the equality comparison before the smi check.
-        __ Cmp(left_reg, constant_smi);
-        dest->true_target()->Branch(equal);
-        Condition left_is_smi = masm_->CheckSmi(left_reg);
-        dest->false_target()->Branch(left_is_smi);
-      } else {
-        // Do the smi check, then the comparison.
-        Condition left_is_smi = masm_->CheckSmi(left_reg);
-        is_smi.Branch(left_is_smi, left_side, right_side);
-      }
-
-      // Jump or fall through to here if we are comparing a non-smi to a
-      // constant smi.  If the non-smi is a heap number and this is not
-      // a loop condition, inline the floating point code.
-      if (!is_loop_condition) {
-        // Right side is a constant smi and left side has been checked
-        // not to be a smi.
-        JumpTarget not_number;
-        __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
-               FACTORY->heap_number_map());
-        not_number.Branch(not_equal, left_side);
-        __ movsd(xmm1,
-                 FieldOperand(left_reg, HeapNumber::kValueOffset));
-        int value = constant_smi->value();
-        if (value == 0) {
-          __ xorpd(xmm0, xmm0);
-        } else {
-          Result temp = allocator()->Allocate();
-          __ movl(temp.reg(), Immediate(value));
-          __ cvtlsi2sd(xmm0, temp.reg());
-          temp.Unuse();
-        }
-        __ ucomisd(xmm1, xmm0);
-        // Jump to builtin for NaN.
-        not_number.Branch(parity_even, left_side);
-        left_side->Unuse();
-        dest->true_target()->Branch(DoubleCondition(cc));
-        dest->false_target()->Jump();
-        not_number.Bind(left_side);
-      }
-
-      // Setup and call the compare stub.
-      CompareFlags flags =
-          static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
-      CompareStub stub(cc, strict, flags);
-      Result result = frame_->CallStub(&stub, left_side, right_side);
-      result.ToRegister();
-      __ testq(result.reg(), result.reg());
-      result.Unuse();
-      if (cc == equal) {
-        dest->Split(cc);
-      } else {
-        dest->true_target()->Branch(cc);
-        dest->false_target()->Jump();
-
-        // It is important for performance for this case to be at the end.
-        is_smi.Bind(left_side, right_side);
-        __ SmiCompare(left_reg, constant_smi);
-        left_side->Unuse();
-        right_side->Unuse();
-        dest->Split(cc);
-      }
-    }
-  }
-}
-
-
-// Load a comparison operand into into a XMM register. Jump to not_numbers jump
-// target passing the left and right result if the operand is not a number.
-static void LoadComparisonOperand(MacroAssembler* masm_,
-                                  Result* operand,
-                                  XMMRegister xmm_reg,
-                                  Result* left_side,
-                                  Result* right_side,
-                                  JumpTarget* not_numbers) {
-  Label done;
-  if (operand->type_info().IsDouble()) {
-    // Operand is known to be a heap number, just load it.
-    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-  } else if (operand->type_info().IsSmi()) {
-    // Operand is known to be a smi. Convert it to double and keep the original
-    // smi.
-    __ SmiToInteger32(kScratchRegister, operand->reg());
-    __ cvtlsi2sd(xmm_reg, kScratchRegister);
-  } else {
-    // Operand type not known, check for smi or heap number.
-    Label smi;
-    __ JumpIfSmi(operand->reg(), &smi);
-    if (!operand->type_info().IsNumber()) {
-      __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
-      __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
-              kScratchRegister);
-      not_numbers->Branch(not_equal, left_side, right_side, taken);
-    }
-    __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
-    __ jmp(&done);
-
-    __ bind(&smi);
-    // Comvert smi to float and keep the original smi.
-    __ SmiToInteger32(kScratchRegister, operand->reg());
-    __ cvtlsi2sd(xmm_reg, kScratchRegister);
-    __ jmp(&done);
-  }
-  __ bind(&done);
-}
-
-
-void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
-                                                   Result* right_side,
-                                                   Condition cc,
-                                                   ControlDestination* dest) {
-  ASSERT(left_side->is_register());
-  ASSERT(right_side->is_register());
-
-  JumpTarget not_numbers;
-  // Load left and right operand into registers xmm0 and xmm1 and compare.
-  LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
-                        &not_numbers);
-  LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
-                        &not_numbers);
-  __ ucomisd(xmm0, xmm1);
-  // Bail out if a NaN is involved.
-  not_numbers.Branch(parity_even, left_side, right_side);
-
-  // Split to destination targets based on comparison.
-  left_side->Unuse();
-  right_side->Unuse();
-  dest->true_target()->Branch(DoubleCondition(cc));
-  dest->false_target()->Jump();
-
-  not_numbers.Bind(left_side, right_side);
-}
-
-
-// Call the function just below TOS on the stack with the given
-// arguments. The receiver is the TOS.
-void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
-                                      CallFunctionFlags flags,
-                                      int position) {
-  // Push the arguments ("left-to-right") on the stack.
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-    frame_->SpillTop();
-  }
-
-  // Record the position for debugging purposes.
-  CodeForSourcePosition(position);
-
-  // Use the shared code stub to call the function.
-  InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-  CallFunctionStub call_function(arg_count, in_loop, flags);
-  Result answer = frame_->CallStub(&call_function, arg_count + 1);
-  // Restore context and replace function on the stack with the
-  // result of the stub invocation.
-  frame_->RestoreContextRegister();
-  frame_->SetElementAt(0, &answer);
-}
-
-
-void CodeGenerator::CallApplyLazy(Expression* applicand,
-                                  Expression* receiver,
-                                  VariableProxy* arguments,
-                                  int position) {
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).
-  // If the arguments object of the scope has not been allocated,
-  // and x.apply is Function.prototype.apply, this optimization
-  // just copies y and the arguments of the current function on the
-  // stack, as receiver and arguments, and calls x.
-  // In the implementation comments, we call x the applicand
-  // and y the receiver.
-  ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
-  ASSERT(arguments->IsArguments());
-
-  // Load applicand.apply onto the stack. This will usually
-  // give us a megamorphic load site. Not super, but it works.
-  Load(applicand);
-  frame()->Dup();
-  Handle<String> name = FACTORY->LookupAsciiSymbol("apply");
-  frame()->Push(name);
-  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
-  __ nop();
-  frame()->Push(&answer);
-
-  // Load the receiver and the existing arguments object onto the
-  // expression stack. Avoid allocating the arguments object here.
-  Load(receiver);
-  LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
-
-  // Emit the source position information after having loaded the
-  // receiver and the arguments.
-  CodeForSourcePosition(position);
-  // Contents of frame at this point:
-  // Frame[0]: arguments object of the current function or the hole.
-  // Frame[1]: receiver
-  // Frame[2]: applicand.apply
-  // Frame[3]: applicand.
-
-  // Check if the arguments object has been lazily allocated
-  // already. If so, just use that instead of copying the arguments
-  // from the stack. This also deals with cases where a local variable
-  // named 'arguments' has been introduced.
-  frame_->Dup();
-  Result probe = frame_->Pop();
-  { VirtualFrame::SpilledScope spilled_scope;
-    Label slow, done;
-    bool try_lazy = true;
-    if (probe.is_constant()) {
-      try_lazy = probe.handle()->IsArgumentsMarker();
-    } else {
-      __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex);
-      probe.Unuse();
-      __ j(not_equal, &slow);
-    }
-
-    if (try_lazy) {
-      Label build_args;
-      // Get rid of the arguments object probe.
-      frame_->Drop();  // Can be called on a spilled frame.
-      // Stack now has 3 elements on it.
-      // Contents of stack at this point:
-      // rsp[0]: receiver
-      // rsp[1]: applicand.apply
-      // rsp[2]: applicand.
-
-      // Check that the receiver really is a JavaScript object.
-      __ movq(rax, Operand(rsp, 0));
-      Condition is_smi = masm_->CheckSmi(rax);
-      __ j(is_smi, &build_args);
-      // We allow all JSObjects including JSFunctions.  As long as
-      // JS_FUNCTION_TYPE is the last instance type and it is right
-      // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
-      // bound.
-      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-      __ j(below, &build_args);
-
-      // Check that applicand.apply is Function.prototype.apply.
-      __ movq(rax, Operand(rsp, kPointerSize));
-      is_smi = masm_->CheckSmi(rax);
-      __ j(is_smi, &build_args);
-      __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
-      __ j(not_equal, &build_args);
-      __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset));
-      __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      Handle<Code> apply_code = Isolate::Current()->builtins()->FunctionApply();
-      __ Cmp(rcx, apply_code);
-      __ j(not_equal, &build_args);
-
-      // Check that applicand is a function.
-      __ movq(rdi, Operand(rsp, 2 * kPointerSize));
-      is_smi = masm_->CheckSmi(rdi);
-      __ j(is_smi, &build_args);
-      __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-      __ j(not_equal, &build_args);
-
-      // Copy the arguments to this function possibly from the
-      // adaptor frame below it.
-      Label invoke, adapted;
-      __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-      __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
-             Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-      __ j(equal, &adapted);
-
-      // No arguments adaptor frame. Copy fixed number of arguments.
-      __ Set(rax, scope()->num_parameters());
-      for (int i = 0; i < scope()->num_parameters(); i++) {
-        __ push(frame_->ParameterAt(i));
-      }
-      __ jmp(&invoke);
-
-      // Arguments adaptor frame present. Copy arguments from there, but
-      // avoid copying too many arguments to avoid stack overflows.
-      __ bind(&adapted);
-      static const uint32_t kArgumentsLimit = 1 * KB;
-      __ SmiToInteger32(rax,
-                        Operand(rdx,
-                                ArgumentsAdaptorFrameConstants::kLengthOffset));
-      __ movl(rcx, rax);
-      __ cmpl(rax, Immediate(kArgumentsLimit));
-      __ j(above, &build_args);
-
-      // Loop through the arguments pushing them onto the execution
-      // stack. We don't inform the virtual frame of the push, so we don't
-      // have to worry about getting rid of the elements from the virtual
-      // frame.
-      Label loop;
-      // rcx is a small non-negative integer, due to the test above.
-      __ testl(rcx, rcx);
-      __ j(zero, &invoke);
-      __ bind(&loop);
-      __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
-      __ decl(rcx);
-      __ j(not_zero, &loop);
-
-      // Invoke the function.
-      __ bind(&invoke);
-      ParameterCount actual(rax);
-      __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-      // Drop applicand.apply and applicand from the stack, and push
-      // the result of the function call, but leave the spilled frame
-      // unchanged, with 3 elements, so it is correct when we compile the
-      // slow-case code.
-      __ addq(rsp, Immediate(2 * kPointerSize));
-      __ push(rax);
-      // Stack now has 1 element:
-      //   rsp[0]: result
-      __ jmp(&done);
-
-      // Slow-case: Allocate the arguments object since we know it isn't
-      // there, and fall-through to the slow-case where we call
-      // applicand.apply.
-      __ bind(&build_args);
-      // Stack now has 3 elements, because we have jumped from where:
-      // rsp[0]: receiver
-      // rsp[1]: applicand.apply
-      // rsp[2]: applicand.
-
-      // StoreArgumentsObject requires a correct frame, and may modify it.
-      Result arguments_object = StoreArgumentsObject(false);
-      frame_->SpillAll();
-      arguments_object.ToRegister();
-      frame_->EmitPush(arguments_object.reg());
-      arguments_object.Unuse();
-      // Stack and frame now have 4 elements.
-      __ bind(&slow);
-    }
-
-    // Generic computation of x.apply(y, args) with no special optimization.
-    // Flip applicand.apply and applicand on the stack, so
-    // applicand looks like the receiver of the applicand.apply call.
-    // Then process it as a normal function call.
-    __ movq(rax, Operand(rsp, 3 * kPointerSize));
-    __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-    __ movq(Operand(rsp, 2 * kPointerSize), rax);
-    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
-
-    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-    Result res = frame_->CallStub(&call_function, 3);
-    // The function and its two arguments have been dropped.
-    frame_->Drop(1);  // Drop the receiver as well.
-    res.ToRegister();
-    frame_->EmitPush(res.reg());
-    // Stack now has 1 element:
-    //   rsp[0]: result
-    if (try_lazy) __ bind(&done);
-  }  // End of spilled scope.
-  // Restore the context register after a call.
-  frame_->RestoreContextRegister();
-}
-
-
-class DeferredStackCheck: public DeferredCode {
- public:
-  DeferredStackCheck() {
-    set_comment("[ DeferredStackCheck");
-  }
-
-  virtual void Generate();
-};
-
-
-void DeferredStackCheck::Generate() {
-  StackCheckStub stub;
-  __ CallStub(&stub);
-}
-
-
-void CodeGenerator::CheckStack() {
-  DeferredStackCheck* deferred = new DeferredStackCheck;
-  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-  deferred->Branch(below);
-  deferred->BindExit();
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  Visit(statement);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(in_spilled_code());
-  set_in_spilled_code(false);
-  VisitStatements(statements);
-  if (frame_ != NULL) {
-    frame_->SpillAll();
-  }
-  set_in_spilled_code(true);
-
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
-#ifdef DEBUG
-  int original_height = frame_->height();
-#endif
-  ASSERT(!in_spilled_code());
-  for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-  ASSERT(!has_valid_frame() || frame_->height() == original_height);
-}
-
-
-void CodeGenerator::VisitBlock(Block* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ Block");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  VisitStatements(node->statements());
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
-  // Call the runtime to declare the globals.  The inevitable call
-  // will sync frame elements to memory anyway, so we do it eagerly to
-  // allow us to push the arguments directly into place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
-  frame_->EmitPush(rsi);  // The context is the first argument.
-  frame_->EmitPush(kScratchRegister);
-  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
-  frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
-  Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
-  // Return value is ignored.
-}
-
-
-void CodeGenerator::VisitDeclaration(Declaration* node) {
-  Comment cmnt(masm_, "[ Declaration");
-  Variable* var = node->proxy()->var();
-  ASSERT(var != NULL);  // must have been resolved
-  Slot* slot = var->AsSlot();
-
-  // If it was not possible to allocate the variable at compile time,
-  // we need to "declare" it at runtime to make sure it actually
-  // exists in the local context.
-  if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    // Variables with a "LOOKUP" slot were introduced as non-locals
-    // during variable resolution and must have mode DYNAMIC.
-    ASSERT(var->is_dynamic());
-    // For now, just do a runtime call.  Sync the virtual frame eagerly
-    // so we can simply push the arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT);
-    frame_->EmitPush(kScratchRegister);
-    // Declaration nodes are always introduced in one of two modes.
-    ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
-    PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Smi::FromInt(attr));
-    // Push initial value, if any.
-    // Note: For variables we must not push an initial value (such as
-    // 'undefined') because we may have a (legal) redeclaration and we
-    // must not destroy the current value.
-    if (node->mode() == Variable::CONST) {
-      frame_->EmitPush(Heap::kTheHoleValueRootIndex);
-    } else if (node->fun() != NULL) {
-      Load(node->fun());
-    } else {
-      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
-    // Ignore the return value (declarations are statements).
-    return;
-  }
-
-  ASSERT(!var->is_global());
-
-  // If we have a function or a constant, we need to initialize the variable.
-  Expression* val = NULL;
-  if (node->mode() == Variable::CONST) {
-    val = new Literal(FACTORY->the_hole_value());
-  } else {
-    val = node->fun();  // NULL if we don't have a function
-  }
-
-  if (val != NULL) {
-    {
-      // Set the initial value.
-      Reference target(this, node->proxy());
-      Load(val);
-      target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
-    }
-    // Get rid of the assigned value (declarations are statements).
-    frame_->Drop();
-  }
-}
-
-
-void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  CodeForStatementPosition(node);
-  Expression* expression = node->expression();
-  expression->MarkAsStatement();
-  Load(expression);
-  // Remove the lingering expression result from the top of stack.
-  frame_->Drop();
-}
-
-
-void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "// EmptyStatement");
-  CodeForStatementPosition(node);
-  // nothing to do
-}
-
-
-void CodeGenerator::VisitIfStatement(IfStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ IfStatement");
-  // Generate different code depending on which parts of the if statement
-  // are present or not.
-  bool has_then_stm = node->HasThenStatement();
-  bool has_else_stm = node->HasElseStatement();
-
-  CodeForStatementPosition(node);
-  JumpTarget exit;
-  if (has_then_stm && has_else_stm) {
-    JumpTarget then;
-    JumpTarget else_;
-    ControlDestination dest(&then, &else_, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The else target was bound, so we compile the else part first.
-      Visit(node->else_statement());
-
-      // We may have dangling jumps to the then part.
-      if (then.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then target was bound, so we compile the then part first.
-      Visit(node->then_statement());
-
-      if (else_.is_linked()) {
-        if (has_valid_frame()) exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    }
-
-  } else if (has_then_stm) {
-    ASSERT(!has_else_stm);
-    JumpTarget then;
-    ControlDestination dest(&then, &exit, true);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.false_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // then part.
-      if (then.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        then.Bind();
-        Visit(node->then_statement());
-      }
-    } else {
-      // The then label was bound.
-      Visit(node->then_statement());
-    }
-
-  } else if (has_else_stm) {
-    ASSERT(!has_then_stm);
-    JumpTarget else_;
-    ControlDestination dest(&exit, &else_, false);
-    LoadCondition(node->condition(), &dest, true);
-
-    if (dest.true_was_fall_through()) {
-      // The exit label was bound.  We may have dangling jumps to the
-      // else part.
-      if (else_.is_linked()) {
-        exit.Unuse();
-        exit.Jump();
-        else_.Bind();
-        Visit(node->else_statement());
-      }
-    } else {
-      // The else label was bound.
-      Visit(node->else_statement());
-    }
-
-  } else {
-    ASSERT(!has_then_stm && !has_else_stm);
-    // We only care about the condition's side effects (not its value
-    // or control flow effect).  LoadCondition is called without
-    // forcing control flow.
-    ControlDestination dest(&exit, &exit, true);
-    LoadCondition(node->condition(), &dest, false);
-    if (!dest.is_used()) {
-      // We got a value on the frame rather than (or in addition to)
-      // control flow.
-      frame_->Drop();
-    }
-  }
-
-  if (exit.is_linked()) {
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ContinueStatement");
-  CodeForStatementPosition(node);
-  node->target()->continue_target()->Jump();
-}
-
-
-void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ BreakStatement");
-  CodeForStatementPosition(node);
-  node->target()->break_target()->Jump();
-}
-
-
-void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ReturnStatement");
-
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result return_value = frame_->Pop();
-  masm()->positions_recorder()->WriteRecordedPositions();
-  if (function_return_is_shadowed_) {
-    function_return_.Jump(&return_value);
-  } else {
-    frame_->PrepareForReturn();
-    if (function_return_.is_bound()) {
-      // If the function return label is already bound we reuse the
-      // code by jumping to the return site.
-      function_return_.Jump(&return_value);
-    } else {
-      function_return_.Bind(&return_value);
-      GenerateReturnSequence(&return_value);
-    }
-  }
-}
-
-
-void CodeGenerator::GenerateReturnSequence(Result* return_value) {
-  // The return value is a live (but not currently reference counted)
-  // reference to rax.  This is safe because the current frame does not
-  // contain a reference to rax (it is prepared for the return by spilling
-  // all registers).
-  if (FLAG_trace) {
-    frame_->Push(return_value);
-    *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
-  }
-  return_value->ToRegister(rax);
-
-  // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
-  Label check_exit_codesize;
-  masm_->bind(&check_exit_codesize);
-#endif
-
-  // Leave the frame and return popping the arguments and the
-  // receiver.
-  frame_->Exit();
-  int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize;
-  __ Ret(arguments_bytes, rcx);
-  DeleteFrame();
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Add padding that will be overwritten by a debugger breakpoint.
-  // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k"
-  // with length 7 (3 + 1 + 3).
-  const int kPadding = Assembler::kJSReturnSequenceLength - 7;
-  for (int i = 0; i < kPadding; ++i) {
-    masm_->int3();
-  }
-  // Check that the size of the code used for returning is large enough
-  // for the debugger's requirements.
-  ASSERT(Assembler::kJSReturnSequenceLength <=
-         masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
-#endif
-}
-
-
-void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  CodeForStatementPosition(node);
-  Load(node->expression());
-  Result context;
-  if (node->is_catch_block()) {
-    context = frame_->CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    context = frame_->CallRuntime(Runtime::kPushContext, 1);
-  }
-
-  // Update context local.
-  frame_->SaveContextRegister();
-
-  // Verify that the runtime call result and rsi agree.
-  if (FLAG_debug_code) {
-    __ cmpq(context.reg(), rsi);
-    __ Assert(equal, "Runtime::NewContext should end up in rsi");
-  }
-}
-
-
-void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WithExitStatement");
-  CodeForStatementPosition(node);
-  // Pop context.
-  __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX));
-  // Update context local.
-  frame_->SaveContextRegister();
-}
-
-
-void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ SwitchStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  // Compile the switch value.
-  Load(node->tag());
-
-  ZoneList<CaseClause*>* cases = node->cases();
-  int length = cases->length();
-  CaseClause* default_clause = NULL;
-
-  JumpTarget next_test;
-  // Compile the case label expressions and comparisons.  Exit early
-  // if a comparison is unconditionally true.  The target next_test is
-  // bound before the loop in order to indicate control flow to the
-  // first comparison.
-  next_test.Bind();
-  for (int i = 0; i < length && !next_test.is_unused(); i++) {
-    CaseClause* clause = cases->at(i);
-    // The default is not a test, but remember it for later.
-    if (clause->is_default()) {
-      default_clause = clause;
-      continue;
-    }
-
-    Comment cmnt(masm_, "[ Case comparison");
-    // We recycle the same target next_test for each test.  Bind it if
-    // the previous test has not done so and then unuse it for the
-    // loop.
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    next_test.Unuse();
-
-    // Duplicate the switch value.
-    frame_->Dup();
-
-    // Compile the label expression.
-    Load(clause->label());
-
-    // Compare and branch to the body if true or the next test if
-    // false.  Prefer the next test as a fall through.
-    ControlDestination dest(clause->body_target(), &next_test, false);
-    Comparison(node, equal, true, &dest);
-
-    // If the comparison fell through to the true target, jump to the
-    // actual body.
-    if (dest.true_was_fall_through()) {
-      clause->body_target()->Unuse();
-      clause->body_target()->Jump();
-    }
-  }
-
-  // If there was control flow to a next test from the last one
-  // compiled, compile a jump to the default or break target.
-  if (!next_test.is_unused()) {
-    if (next_test.is_linked()) {
-      next_test.Bind();
-    }
-    // Drop the switch value.
-    frame_->Drop();
-    if (default_clause != NULL) {
-      default_clause->body_target()->Jump();
-    } else {
-      node->break_target()->Jump();
-    }
-  }
-
-  // The last instruction emitted was a jump, either to the default
-  // clause or the break target, or else to a case body from the loop
-  // that compiles the tests.
-  ASSERT(!has_valid_frame());
-  // Compile case bodies as needed.
-  for (int i = 0; i < length; i++) {
-    CaseClause* clause = cases->at(i);
-
-    // There are two ways to reach the body: from the corresponding
-    // test or as the fall through of the previous body.
-    if (clause->body_target()->is_linked() || has_valid_frame()) {
-      if (clause->body_target()->is_linked()) {
-        if (has_valid_frame()) {
-          // If we have both a jump to the test and a fall through, put
-          // a jump on the fall through path to avoid the dropping of
-          // the switch value on the test path.  The exception is the
-          // default which has already had the switch value dropped.
-          if (clause->is_default()) {
-            clause->body_target()->Bind();
-          } else {
-            JumpTarget body;
-            body.Jump();
-            clause->body_target()->Bind();
-            frame_->Drop();
-            body.Bind();
-          }
-        } else {
-          // No fall through to worry about.
-          clause->body_target()->Bind();
-          if (!clause->is_default()) {
-            frame_->Drop();
-          }
-        }
-      } else {
-        // Otherwise, we have only fall through.
-        ASSERT(has_valid_frame());
-      }
-
-      // We are now prepared to compile the body.
-      Comment cmnt(masm_, "[ Case body");
-      VisitStatements(clause->statements());
-    }
-    clause->body_target()->Unuse();
-  }
-
-  // We may not have a valid frame here so bind the break target only
-  // if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  JumpTarget body(JumpTarget::BIDIRECTIONAL);
-  IncrementLoopNesting();
-
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  // Label the top of the loop for the backward jump if necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // Use the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case ALWAYS_FALSE:
-      // No need to label it.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      break;
-    case DONT_KNOW:
-      // Continue is the test, so use the backward body target.
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      body.Bind();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Compile the test.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // If control flow can fall off the end of the body, jump back
-      // to the top and bind the break target at the exit.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case ALWAYS_FALSE:
-      // We may have had continues or breaks in the body.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-    case DONT_KNOW:
-      // We have to compile the test expression if it can be reached by
-      // control flow falling out of the body or via continue.
-      if (node->continue_target()->is_linked()) {
-        node->continue_target()->Bind();
-      }
-      if (has_valid_frame()) {
-        Comment cmnt(masm_, "[ DoWhileCondition");
-        CodeForDoWhileConditionPosition(node);
-        ControlDestination dest(&body, node->break_target(), false);
-        LoadCondition(node->cond(), &dest, true);
-      }
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
-      }
-      break;
-  }
-
-  DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ WhileStatement");
-  CodeForStatementPosition(node);
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop with the continue target.
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-      node->continue_target()->Bind();
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is the test at the bottom, no need to label the test
-        // at the top.  The body is a backward target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Label the test at the top as the continue target.  The body
-        // is a forward-only target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      }
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  Visit(node->body());
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // The loop body has been labeled with the continue target.
-      if (has_valid_frame()) {
-        node->continue_target()->Jump();
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        // If we have chosen to recompile the test at the bottom,
-        // then it is the continue target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here and thus an invalid fall-through).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // If we have chosen not to recompile the test at the bottom,
-        // jump back to the one at the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) {
-  ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER);
-  if (slot->type() == Slot::LOCAL) {
-    frame_->SetTypeForLocalAt(slot->index(), info);
-  } else {
-    frame_->SetTypeForParamAt(slot->index(), info);
-  }
-  if (FLAG_debug_code && info.IsSmi()) {
-    if (slot->type() == Slot::LOCAL) {
-      frame_->PushLocalAt(slot->index());
-    } else {
-      frame_->PushParameterAt(slot->index());
-    }
-    Result var = frame_->Pop();
-    var.ToRegister();
-    __ AbortIfNotSmi(var.reg());
-  }
-}
-
-
-void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
-  // A fast smi loop is a for loop with an initializer
-  // that is a simple assignment of a smi to a stack variable,
-  // a test that is a simple test of that variable against a smi constant,
-  // and a step that is a increment/decrement of the variable, and
-  // where the variable isn't modified in the loop body.
-  // This guarantees that the variable is always a smi.
-
-  Variable* loop_var = node->loop_variable();
-  Smi* initial_value = *Handle<Smi>::cast(node->init()
-      ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
-  Smi* limit_value = *Handle<Smi>::cast(
-      node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
-  Token::Value compare_op =
-      node->cond()->AsCompareOperation()->op();
-  bool increments =
-      node->next()->StatementAsCountOperation()->op() == Token::INC;
-
-  // Check that the condition isn't initially false.
-  bool initially_false = false;
-  int initial_int_value = initial_value->value();
-  int limit_int_value = limit_value->value();
-  switch (compare_op) {
-    case Token::LT:
-      initially_false = initial_int_value >= limit_int_value;
-      break;
-    case Token::LTE:
-      initially_false = initial_int_value > limit_int_value;
-      break;
-    case Token::GT:
-      initially_false = initial_int_value <= limit_int_value;
-      break;
-    case Token::GTE:
-      initially_false = initial_int_value < limit_int_value;
-      break;
-    default:
-      UNREACHABLE();
-  }
-  if (initially_false) return;
-
-  // Only check loop condition at the end.
-
-  Visit(node->init());
-
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  // Set type and stack height of BreakTargets.
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  IncrementLoopNesting();
-  loop.Bind();
-
-  // Set number type of the loop variable to smi.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi());
-  Visit(node->body());
-
-  if (node->continue_target()->is_linked()) {
-    node->continue_target()->Bind();
-  }
-
-  if (has_valid_frame()) {
-    CodeForStatementPosition(node);
-    Slot* loop_var_slot = loop_var->AsSlot();
-    if (loop_var_slot->type() == Slot::LOCAL) {
-      frame_->TakeLocalAt(loop_var_slot->index());
-    } else {
-      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
-      frame_->TakeParameterAt(loop_var_slot->index());
-    }
-    Result loop_var_result = frame_->Pop();
-    if (!loop_var_result.is_register()) {
-      loop_var_result.ToRegister();
-    }
-    Register loop_var_reg = loop_var_result.reg();
-    frame_->Spill(loop_var_reg);
-    if (increments) {
-      __ SmiAddConstant(loop_var_reg,
-                        loop_var_reg,
-                        Smi::FromInt(1));
-    } else {
-      __ SmiSubConstant(loop_var_reg,
-                        loop_var_reg,
-                        Smi::FromInt(1));
-    }
-
-    frame_->Push(&loop_var_result);
-    if (loop_var_slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(loop_var_slot->index());
-    } else {
-      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
-      frame_->StoreToParameterAt(loop_var_slot->index());
-    }
-    frame_->Drop();
-
-    __ SmiCompare(loop_var_reg, limit_value);
-    Condition condition;
-    switch (compare_op) {
-      case Token::LT:
-        condition = less;
-        break;
-      case Token::LTE:
-        condition = less_equal;
-        break;
-      case Token::GT:
-        condition = greater;
-        break;
-      case Token::GTE:
-        condition = greater_equal;
-        break;
-      default:
-        condition = never;
-        UNREACHABLE();
-    }
-    loop.Branch(condition);
-  }
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForStatement(ForStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ ForStatement");
-  CodeForStatementPosition(node);
-
-  if (node->is_fast_smi_loop()) {
-    GenerateFastSmiLoop(node);
-    return;
-  }
-
-  // Compile the init expression if present.
-  if (node->init() != NULL) {
-    Visit(node->init());
-  }
-
-  // If the condition is always false and has no side effects, we do not
-  // need to compile anything else.
-  ConditionAnalysis info = AnalyzeCondition(node->cond());
-  if (info == ALWAYS_FALSE) return;
-
-  // Do not duplicate conditions that may have function literal
-  // subexpressions.  This can cause us to compile the function literal
-  // twice.
-  bool test_at_bottom = !node->may_have_function_literal();
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  IncrementLoopNesting();
-
-  // Target for backward edge if no test at the bottom, otherwise
-  // unused.
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-  // Target for backward edge if there is a test at the bottom,
-  // otherwise used as target for test at the top.
-  JumpTarget body;
-  if (test_at_bottom) {
-    body.set_direction(JumpTarget::BIDIRECTIONAL);
-  }
-
-  // Based on the condition analysis, compile the test as necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      // We will not compile the test expression.  Label the top of the
-      // loop.
-      if (node->next() == NULL) {
-        // Use the continue target if there is no update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // Otherwise use the backward loop target.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-      break;
-    case DONT_KNOW: {
-      if (test_at_bottom) {
-        // Continue is either the update expression or the test at the
-        // bottom, no need to label the test at the top.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else if (node->next() == NULL) {
-        // We are not recompiling the test at the bottom and there is no
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        // We are not recompiling the test at the bottom and there is an
-        // update expression.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
-      }
-
-      // Compile the test with the body as the true target and preferred
-      // fall-through and with the break target as the false target.
-      ControlDestination dest(&body, node->break_target(), true);
-      LoadCondition(node->cond(), &dest, true);
-
-      if (dest.false_was_fall_through()) {
-        // If we got the break target as fall-through, the test may have
-        // been unconditionally false (if there are no jumps to the
-        // body).
-        if (!body.is_linked()) {
-          DecrementLoopNesting();
-          return;
-        }
-
-        // Otherwise, jump around the body on the fall through and then
-        // bind the body target.
-        node->break_target()->Unuse();
-        node->break_target()->Jump();
-        body.Bind();
-      }
-      break;
-    }
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-
-  Visit(node->body());
-
-  // If there is an update expression, compile it if necessary.
-  if (node->next() != NULL) {
-    if (node->continue_target()->is_linked()) {
-      node->continue_target()->Bind();
-    }
-
-    // Control can reach the update by falling out of the body or by a
-    // continue.
-    if (has_valid_frame()) {
-      // Record the source position of the statement as this code which
-      // is after the code for the body actually belongs to the loop
-      // statement and not the body.
-      CodeForStatementPosition(node);
-      Visit(node->next());
-    }
-  }
-
-  // Based on the condition analysis, compile the backward jump as
-  // necessary.
-  switch (info) {
-    case ALWAYS_TRUE:
-      if (has_valid_frame()) {
-        if (node->next() == NULL) {
-          node->continue_target()->Jump();
-        } else {
-          loop.Jump();
-        }
-      }
-      break;
-    case DONT_KNOW:
-      if (test_at_bottom) {
-        if (node->continue_target()->is_linked()) {
-          // We can have dangling jumps to the continue target if there
-          // was no update expression.
-          node->continue_target()->Bind();
-        }
-        // Control can reach the test at the bottom by falling out of
-        // the body, by a continue in the body, or from the update
-        // expression.
-        if (has_valid_frame()) {
-          // The break target is the fall-through (body is a backward
-          // jump from here).
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), &dest, true);
-        }
-      } else {
-        // Otherwise, jump back to the test at the top.
-        if (has_valid_frame()) {
-          if (node->next() == NULL) {
-            node->continue_target()->Jump();
-          } else {
-            loop.Jump();
-          }
-        }
-      }
-      break;
-    case ALWAYS_FALSE:
-      UNREACHABLE();
-      break;
-  }
-
-  // The break target may be already bound (by the condition), or there
-  // may not be a valid frame.  Bind it only if needed.
-  if (node->break_target()->is_linked()) {
-    node->break_target()->Bind();
-  }
-  DecrementLoopNesting();
-}
-
-
-void CodeGenerator::VisitForInStatement(ForInStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ ForInStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget primitive;
-  JumpTarget jsobject;
-  JumpTarget fixed_array;
-  JumpTarget entry(JumpTarget::BIDIRECTIONAL);
-  JumpTarget end_del_check;
-  JumpTarget exit;
-
-  // Get the object to enumerate over (converted to JSObject).
-  LoadAndSpill(node->enumerable());
-
-  // Both SpiderMonkey and kjs ignore null and undefined in contrast
-  // to the specification.  12.6.4 mandates a call to ToObject.
-  frame_->EmitPop(rax);
-
-  // rax: value to be iterated over
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  exit.Branch(equal);
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  exit.Branch(equal);
-
-  // Stack layout in body:
-  // [iteration counter (smi)] <- slot 0
-  // [length of array]         <- slot 1
-  // [FixedArray]              <- slot 2
-  // [Map or 0]                <- slot 3
-  // [Object]                  <- slot 4
-
-  // Check if enumerable is already a JSObject
-  // rax: value to be iterated over
-  Condition is_smi = masm_->CheckSmi(rax);
-  primitive.Branch(is_smi);
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-  jsobject.Branch(above_equal);
-
-  primitive.Bind();
-  frame_->EmitPush(rax);
-  frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
-  // function call returns the value in rax, which is where we want it below
-
-  jsobject.Bind();
-  // Get the set of properties (as a FixedArray or Map).
-  // rax: value to be iterated over
-  frame_->EmitPush(rax);  // Push the object being iterated over.
-
-
-  // Check cache validity in generated code. This is a fast case for
-  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
-  // guarantee cache validity, call the runtime system to check cache
-  // validity or get the property names in a fixed array.
-  JumpTarget call_runtime;
-  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-  JumpTarget check_prototype;
-  JumpTarget use_cache;
-  __ movq(rcx, rax);
-  loop.Bind();
-  // Check that there are no elements.
-  __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset));
-  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
-  call_runtime.Branch(not_equal);
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
-  __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex);
-  call_runtime.Branch(equal);
-  // Check that there in an enum cache in the non-empty instance
-  // descriptors.  This is the case if the next enumeration index
-  // field does not contain a smi.
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
-  is_smi = masm_->CheckSmi(rdx);
-  call_runtime.Branch(is_smi);
-  // For all objects but the receiver, check that the cache is empty.
-  __ cmpq(rcx, rax);
-  check_prototype.Branch(equal);
-  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex);
-  call_runtime.Branch(not_equal);
-  check_prototype.Bind();
-  // Load the prototype from the map and loop if non-null.
-  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  __ CompareRoot(rcx, Heap::kNullValueRootIndex);
-  loop.Branch(not_equal);
-  // The enum cache is valid.  Load the map of the object being
-  // iterated over and use the cache for the iteration.
-  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
-  use_cache.Jump();
-
-  call_runtime.Bind();
-  // Call the runtime to get the property names for the object.
-  frame_->EmitPush(rax);  // push the Object (slot 4) for the runtime call
-  frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
-
-  // If we got a Map, we can do a fast modification check.
-  // Otherwise, we got a FixedArray, and we have to do a slow check.
-  // rax: map or fixed array (result from call to
-  // Runtime::kGetPropertyNamesFast)
-  __ movq(rdx, rax);
-  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
-  fixed_array.Branch(not_equal);
-
-  use_cache.Bind();
-  // Get enum cache
-  // rax: map (either the result from a call to
-  // Runtime::kGetPropertyNamesFast or has been fetched directly from
-  // the object)
-  __ movq(rcx, rax);
-  __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
-  // Get the bridge array held in the enumeration index field.
-  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
-  // Get the cache from the bridge array.
-  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-
-  frame_->EmitPush(rax);  // <- slot 3
-  frame_->EmitPush(rdx);  // <- slot 2
-  __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
-  frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
-  entry.Jump();
-
-  fixed_array.Bind();
-  // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
-  frame_->EmitPush(rax);  // <- slot 2
-
-  // Push the length of the array and the initial index onto the stack.
-  __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
-  frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
-
-  // Condition.
-  entry.Bind();
-  // Grab the current frame's height for the break and continue
-  // targets only after all the state is pushed on the frame.
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-
-  __ movq(rax, frame_->ElementAt(0));  // load the current count
-  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
-  node->break_target()->Branch(below_equal);
-
-  // Get the i'th entry of the array.
-  __ movq(rdx, frame_->ElementAt(2));
-  SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx,
-          FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize));
-
-  // Get the expected map from the stack or a zero map in the
-  // permanent slow case rax: current iteration count rbx: i'th entry
-  // of the enum cache
-  __ movq(rdx, frame_->ElementAt(3));
-  // Check if the expected map still matches that of the enumerable.
-  // If not, we have to filter the key.
-  // rax: current iteration count
-  // rbx: i'th entry of the enum cache
-  // rdx: expected map value
-  __ movq(rcx, frame_->ElementAt(4));
-  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ cmpq(rcx, rdx);
-  end_del_check.Branch(equal);
-
-  // Convert the entry to a string (or null if it isn't a property anymore).
-  frame_->EmitPush(frame_->ElementAt(4));  // push enumerable
-  frame_->EmitPush(rbx);  // push entry
-  frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
-  __ movq(rbx, rax);
-
-  // If the property has been removed while iterating, we just skip it.
-  __ Cmp(rbx, Smi::FromInt(0));
-  node->continue_target()->Branch(equal);
-
-  end_del_check.Bind();
-  // Store the entry in the 'each' expression and take another spin in the
-  // loop.  rdx: i'th entry of the enum cache (or string there of)
-  frame_->EmitPush(rbx);
-  { Reference each(this, node->each());
-    // Loading a reference may leave the frame in an unspilled state.
-    frame_->SpillAll();
-    if (!each.is_illegal()) {
-      if (each.size() > 0) {
-        frame_->EmitPush(frame_->ElementAt(each.size()));
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop(2);  // Drop the original and the copy of the element.
-      } else {
-        // If the reference has size zero then we can use the value below
-        // the reference as if it were above the reference, instead of pushing
-        // a new copy of it above the reference.
-        each.SetValue(NOT_CONST_INIT);
-        frame_->Drop();  // Drop the original of the element.
-      }
-    }
-  }
-  // Unloading a reference may leave the frame in an unspilled state.
-  frame_->SpillAll();
-
-  // Body.
-  CheckStack();  // TODO(1222600): ignore if body contains calls.
-  VisitAndSpill(node->body());
-
-  // Next.  Reestablish a spilled frame in case we are coming here via
-  // a continue in the body.
-  node->continue_target()->Bind();
-  frame_->SpillAll();
-  frame_->EmitPop(rax);
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  frame_->EmitPush(rax);
-  entry.Jump();
-
-  // Cleanup.  No need to spill because VirtualFrame::Drop is safe for
-  // any frame.
-  node->break_target()->Bind();
-  frame_->Drop(5);
-
-  // Exit.
-  exit.Bind();
-
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
-}
-
-
-void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  CodeForStatementPosition(node);
-
-  JumpTarget try_block;
-  JumpTarget exit;
-
-  try_block.Call();
-  // --- Catch block ---
-  frame_->EmitPush(rax);
-
-  // Store the caught exception in the catch variable.
-  Variable* catch_var = node->catch_var()->var();
-  ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL);
-  StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT);
-
-  // Remove the exception from the stack.
-  frame_->Drop();
-
-  VisitStatementsAndSpill(node->catch_block()->statements());
-  if (has_valid_frame()) {
-    exit.Jump();
-  }
-
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_CATCH_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  bool has_unlinks = false;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    has_unlinks = has_unlinks || shadows[i]->is_linked();
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // Make sure that there's nothing left on the stack above the
-  // handler structure.
-  if (FLAG_debug_code) {
-    __ movq(kScratchRegister, handler_address);
-    __ cmpq(rsp, Operand(kScratchRegister, 0));
-    __ Assert(equal, "stack pointer should point to top handler");
-  }
-
-  // If we can fall off the end of the try block, unlink from try chain.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.  Unlink from
-    // the handler list and drop the rest of this handler from the
-    // frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    __ movq(kScratchRegister, handler_address);
-    frame_->EmitPop(Operand(kScratchRegister, 0));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-    if (has_unlinks) {
-      exit.Jump();
-    }
-  }
-
-  // Generate unlink code for the (formerly) shadowing targets that
-  // have been jumped to.  Deallocate each shadow target.
-  Result return_value;
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // Unlink from try chain; be careful not to destroy the TOS if
-      // there is one.
-      if (i == kReturnShadowIndex) {
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(rax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that we
-      // break from (eg, for...in) may have left stuff on the stack.
-      __ movq(kScratchRegister, handler_address);
-      __ movq(rsp, Operand(kScratchRegister, 0));
-      frame_->Forget(frame_->height() - handler_height);
-
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      __ movq(kScratchRegister, handler_address);
-      frame_->EmitPop(Operand(kScratchRegister, 0));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        if (!function_return_is_shadowed_) frame_->PrepareForReturn();
-        shadows[i]->other_target()->Jump(&return_value);
-      } else {
-        shadows[i]->other_target()->Jump();
-      }
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  ASSERT(!in_spilled_code());
-  VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  CodeForStatementPosition(node);
-
-  // State: Used to keep track of reason for entering the finally
-  // block. Should probably be extended to hold information for
-  // break/continue from within the try block.
-  enum { FALLING, THROWING, JUMPING };
-
-  JumpTarget try_block;
-  JumpTarget finally_block;
-
-  try_block.Call();
-
-  frame_->EmitPush(rax);
-  // In case of thrown exceptions, this is where we continue.
-  __ Move(rcx, Smi::FromInt(THROWING));
-  finally_block.Jump();
-
-  // --- Try block ---
-  try_block.Bind();
-
-  frame_->PushTryHandler(TRY_FINALLY_HANDLER);
-  int handler_height = frame_->height();
-
-  // Shadow the jump targets for all escapes from the try block, including
-  // returns.  During shadowing, the original target is hidden as the
-  // ShadowTarget and operations on the original actually affect the
-  // shadowing target.
-  //
-  // We should probably try to unify the escaping targets and the return
-  // target.
-  int nof_escapes = node->escaping_targets()->length();
-  List<ShadowTarget*> shadows(1 + nof_escapes);
-
-  // Add the shadow target for the function return.
-  static const int kReturnShadowIndex = 0;
-  shadows.Add(new ShadowTarget(&function_return_));
-  bool function_return_was_shadowed = function_return_is_shadowed_;
-  function_return_is_shadowed_ = true;
-  ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
-
-  // Add the remaining shadow targets.
-  for (int i = 0; i < nof_escapes; i++) {
-    shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
-  }
-
-  // Generate code for the statements in the try block.
-  VisitStatementsAndSpill(node->try_block()->statements());
-
-  // Stop the introduced shadowing and count the number of required unlinks.
-  // After shadowing stops, the original targets are unshadowed and the
-  // ShadowTargets represent the formerly shadowing targets.
-  int nof_unlinks = 0;
-  for (int i = 0; i < shadows.length(); i++) {
-    shadows[i]->StopShadowing();
-    if (shadows[i]->is_linked()) nof_unlinks++;
-  }
-  function_return_is_shadowed_ = function_return_was_shadowed;
-
-  // Get an external reference to the handler address.
-  ExternalReference handler_address(Isolate::k_handler_address, isolate());
-
-  // If we can fall off the end of the try block, unlink from the try
-  // chain and set the state on the frame to FALLING.
-  if (has_valid_frame()) {
-    // The next handler address is on top of the frame.
-    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-    __ movq(kScratchRegister, handler_address);
-    frame_->EmitPop(Operand(kScratchRegister, 0));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-    // Fake a top of stack value (unneeded when FALLING) and set the
-    // state in ecx, then jump around the unlink blocks if any.
-    frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-    __ Move(rcx, Smi::FromInt(FALLING));
-    if (nof_unlinks > 0) {
-      finally_block.Jump();
-    }
-  }
-
-  // Generate code to unlink and set the state for the (formerly)
-  // shadowing targets that have been jumped to.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (shadows[i]->is_linked()) {
-      // If we have come from the shadowed return, the return value is
-      // on the virtual frame.  We must preserve it until it is
-      // pushed.
-      if (i == kReturnShadowIndex) {
-        Result return_value;
-        shadows[i]->Bind(&return_value);
-        return_value.ToRegister(rax);
-      } else {
-        shadows[i]->Bind();
-      }
-      // Because we can be jumping here (to spilled code) from
-      // unspilled code, we need to reestablish a spilled frame at
-      // this block.
-      frame_->SpillAll();
-
-      // Reload sp from the top handler, because some statements that
-      // we break from (eg, for...in) may have left stuff on the
-      // stack.
-      __ movq(kScratchRegister, handler_address);
-      __ movq(rsp, Operand(kScratchRegister, 0));
-      frame_->Forget(frame_->height() - handler_height);
-
-      // Unlink this handler and drop it from the frame.
-      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-      __ movq(kScratchRegister, handler_address);
-      frame_->EmitPop(Operand(kScratchRegister, 0));
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-
-      if (i == kReturnShadowIndex) {
-        // If this target shadowed the function return, materialize
-        // the return value on the stack.
-        frame_->EmitPush(rax);
-      } else {
-        // Fake TOS for targets that shadowed breaks and continues.
-        frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-      }
-      __ Move(rcx, Smi::FromInt(JUMPING + i));
-      if (--nof_unlinks > 0) {
-        // If this is not the last unlink block, jump around the next.
-        finally_block.Jump();
-      }
-    }
-  }
-
-  // --- Finally block ---
-  finally_block.Bind();
-
-  // Push the state on the stack.
-  frame_->EmitPush(rcx);
-
-  // We keep two elements on the stack - the (possibly faked) result
-  // and the state - while evaluating the finally block.
-  //
-  // Generate code for the statements in the finally block.
-  VisitStatementsAndSpill(node->finally_block()->statements());
-
-  if (has_valid_frame()) {
-    // Restore state and return value or faked TOS.
-    frame_->EmitPop(rcx);
-    frame_->EmitPop(rax);
-  }
-
-  // Generate code to jump to the right destination for all used
-  // formerly shadowing targets.  Deallocate each shadow target.
-  for (int i = 0; i < shadows.length(); i++) {
-    if (has_valid_frame() && shadows[i]->is_bound()) {
-      BreakTarget* original = shadows[i]->other_target();
-      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
-      if (i == kReturnShadowIndex) {
-        // The return value is (already) in rax.
-        Result return_value = allocator_->Allocate(rax);
-        ASSERT(return_value.is_valid());
-        if (function_return_is_shadowed_) {
-          original->Branch(equal, &return_value);
-        } else {
-          // Branch around the preparation for return which may emit
-          // code.
-          JumpTarget skip;
-          skip.Branch(not_equal);
-          frame_->PrepareForReturn();
-          original->Jump(&return_value);
-          skip.Bind();
-        }
-      } else {
-        original->Branch(equal);
-      }
-    }
-  }
-
-  if (has_valid_frame()) {
-    // Check if we need to rethrow the exception.
-    JumpTarget exit;
-    __ SmiCompare(rcx, Smi::FromInt(THROWING));
-    exit.Branch(not_equal);
-
-    // Rethrow exception.
-    frame_->EmitPush(rax);  // undo pop from above
-    frame_->CallRuntime(Runtime::kReThrow, 1);
-
-    // Done.
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
-  ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  CodeForStatementPosition(node);
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  // Spill everything, even constants, to the frame.
-  frame_->SpillAll();
-
-  frame_->DebugBreak();
-  // Ignore the return value.
-#endif
-}
-
-
-void CodeGenerator::InstantiateFunction(
-    Handle<SharedFunctionInfo> function_info,
-    bool pretenure) {
-  // The inevitable call will sync frame elements to memory anyway, so
-  // we do it eagerly to allow us to push the arguments directly into
-  // place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
-
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  if (!pretenure &&
-      scope()->is_function_scope() &&
-      function_info->num_literals() == 0) {
-    FastNewClosureStub stub(
-        function_info->strict_mode() ? kStrictMode : kNonStrictMode);
-    frame_->Push(function_info);
-    Result answer = frame_->CallStub(&stub, 1);
-    frame_->Push(&answer);
-  } else {
-    // Call the runtime to instantiate the function based on the
-    // shared function info.
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(function_info);
-    frame_->EmitPush(pretenure
-                     ? FACTORY->true_value()
-                     : FACTORY->false_value());
-    Result result = frame_->CallRuntime(Runtime::kNewClosure, 3);
-    frame_->Push(&result);
-  }
-}
-
-
-void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
-
-  // Build the function info and instantiate it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(node, script());
-  // Check for stack-overflow exception.
-  if (function_info.is_null()) {
-    SetStackOverflow();
-    return;
-  }
-  InstantiateFunction(function_info, node->pretenure());
-}
-
-
-void CodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
-  InstantiateFunction(node->shared_function_info(), false);
-}
-
-
-void CodeGenerator::VisitConditional(Conditional* node) {
-  Comment cmnt(masm_, "[ Conditional");
-  JumpTarget then;
-  JumpTarget else_;
-  JumpTarget exit;
-  ControlDestination dest(&then, &else_, true);
-  LoadCondition(node->condition(), &dest, true);
-
-  if (dest.false_was_fall_through()) {
-    // The else target was bound, so we compile the else part first.
-    Load(node->else_expression());
-
-    if (then.is_linked()) {
-      exit.Jump();
-      then.Bind();
-      Load(node->then_expression());
-    }
-  } else {
-    // The then target was bound, so we compile the then part first.
-    Load(node->then_expression());
-
-    if (else_.is_linked()) {
-      exit.Jump();
-      else_.Bind();
-      Load(node->else_expression());
-    }
-  }
-
-  exit.Bind();
-}
-
-
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    JumpTarget slow;
-    JumpTarget done;
-    Result value;
-
-    // Generate fast case for loading from slots that correspond to
-    // local/global variables or arguments unless they are shadowed by
-    // eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(slot,
-                                    typeof_state,
-                                    &value,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // A runtime call is inevitable.  We eagerly sync frame elements
-    // to memory so that we can push the arguments directly into place
-    // on top of the frame.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
-    frame_->EmitPush(kScratchRegister);
-    if (typeof_state == INSIDE_TYPEOF) {
-       value =
-         frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
-    } else {
-       value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    }
-
-    done.Bind(&value);
-    frame_->Push(&value);
-
-  } else if (slot->var()->mode() == Variable::CONST) {
-    // Const slots may contain 'the hole' value (the constant hasn't been
-    // initialized yet) which needs to be converted into the 'undefined'
-    // value.
-    //
-    // We currently spill the virtual frame because constants use the
-    // potentially unsafe direct-frame access of SlotOperand.
-    VirtualFrame::SpilledScope spilled_scope;
-    Comment cmnt(masm_, "[ Load const");
-    JumpTarget exit;
-    __ movq(rcx, SlotOperand(slot, rcx));
-    __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
-    exit.Branch(not_equal);
-    __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
-    exit.Bind();
-    frame_->EmitPush(rcx);
-
-  } else if (slot->type() == Slot::PARAMETER) {
-    frame_->PushParameterAt(slot->index());
-
-  } else if (slot->type() == Slot::LOCAL) {
-    frame_->PushLocalAt(slot->index());
-
-  } else {
-    // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
-    // here.
-    //
-    // The use of SlotOperand below is safe for an unspilled frame
-    // because it will always be a context slot.
-    ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame_->Push(&temp);
-  }
-}
-
-
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  LoadFromSlot(slot, state);
-
-  // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
-
-  // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Pop the loaded value from the stack.
-  Result value = frame_->Pop();
-
-  // If the loaded value is a constant, we know if the arguments
-  // object has been lazily loaded yet.
-  if (value.is_constant()) {
-    if (value.handle()->IsArgumentsMarker()) {
-      Result arguments = StoreArgumentsObject(false);
-      frame_->Push(&arguments);
-    } else {
-      frame_->Push(&value);
-    }
-    return;
-  }
-
-  // The loaded value is in a register. If it is the sentinel that
-  // indicates that we haven't loaded the arguments object yet, we
-  // need to do it now.
-  JumpTarget exit;
-  __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex);
-  frame_->Push(&value);
-  exit.Branch(not_equal);
-  Result arguments = StoreArgumentsObject(false);
-  frame_->SetElementAt(0, &arguments);
-  exit.Bind();
-}
-
-
-Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
-    Slot* slot,
-    TypeofState typeof_state,
-    JumpTarget* slow) {
-  // Check that no extension objects have been created by calls to
-  // eval from the current scope to the global scope.
-  Register context = rsi;
-  Result tmp = allocator_->Allocate();
-  ASSERT(tmp.is_valid());  // All non-reserved registers were available.
-
-  Scope* s = scope();
-  while (s != NULL) {
-    if (s->num_heap_slots() > 0) {
-      if (s->calls_eval()) {
-        // Check that extension is NULL.
-        __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
-               Immediate(0));
-        slow->Branch(not_equal, not_taken);
-      }
-      // Load next context in chain.
-      __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
-      __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-      context = tmp.reg();
-    }
-    // If no outer scope calls eval, we do not need to check more
-    // context extensions.  If we have reached an eval scope, we check
-    // all extensions from this point.
-    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
-    s = s->outer_scope();
-  }
-
-  if (s->is_eval_scope()) {
-    // Loop up the context chain.  There is no frame effect so it is
-    // safe to use raw labels here.
-    Label next, fast;
-    if (!context.is(tmp.reg())) {
-      __ movq(tmp.reg(), context);
-    }
-    // Load map for comparison into register, outside loop.
-    __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
-    __ bind(&next);
-    // Terminate at global context.
-    __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
-    __ j(equal, &fast);
-    // Check that extension is NULL.
-    __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
-    slow->Branch(not_equal);
-    // Load next context in chain.
-    __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
-    __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
-    __ jmp(&next);
-    __ bind(&fast);
-  }
-  tmp.Unuse();
-
-  // All extension objects were empty and it is safe to use a global
-  // load IC call.
-  LoadGlobal();
-  frame_->Push(slot->var()->name());
-  RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
-                         ? RelocInfo::CODE_TARGET
-                         : RelocInfo::CODE_TARGET_CONTEXT;
-  Result answer = frame_->CallLoadIC(mode);
-  // A test rax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test rax
-  // instruction here.
-  masm_->nop();
-  return answer;
-}
-
-
-void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                                    TypeofState typeof_state,
-                                                    Result* result,
-                                                    JumpTarget* slow,
-                                                    JumpTarget* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
-    done->Jump(result);
-
-  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-    Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot();
-    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
-    if (potential_slot != NULL) {
-      // Generate fast case for locals that rewrite to slots.
-      // Allocate a fresh register to use as a temp in
-      // ContextSlotOperandCheckExtensions and to hold the result
-      // value.
-      *result = allocator_->Allocate();
-      ASSERT(result->is_valid());
-      __ movq(result->reg(),
-              ContextSlotOperandCheckExtensions(potential_slot,
-                                                *result,
-                                                slow));
-      if (potential_slot->var()->mode() == Variable::CONST) {
-        __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
-        done->Branch(not_equal, result);
-        __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
-      }
-      done->Jump(result);
-    } else if (rewrite != NULL) {
-      // Generate fast case for argument loads.
-      Property* property = rewrite->AsProperty();
-      if (property != NULL) {
-        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
-        Literal* key_literal = property->key()->AsLiteral();
-        if (obj_proxy != NULL &&
-            key_literal != NULL &&
-            obj_proxy->IsArguments() &&
-            key_literal->handle()->IsSmi()) {
-          // Load arguments object if there are no eval-introduced
-          // variables. Then load the argument from the arguments
-          // object using keyed load.
-          Result arguments = allocator()->Allocate();
-          ASSERT(arguments.is_valid());
-          __ movq(arguments.reg(),
-                  ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(),
-                                                    arguments,
-                                                    slow));
-          frame_->Push(&arguments);
-          frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad();
-          done->Jump(result);
-        }
-      }
-    }
-  }
-}
-
-
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
-  if (slot->type() == Slot::LOOKUP) {
-    ASSERT(slot->var()->is_dynamic());
-
-    // For now, just do a runtime call.  Since the call is inevitable,
-    // we eagerly sync the virtual frame so we can directly push the
-    // arguments into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(slot->var()->name());
-
-    Result value;
-    if (init_state == CONST_INIT) {
-      // Same as the case for a normal store, but ignores attribute
-      // (e.g. READ_ONLY) of context slot so that we can initialize const
-      // properties (introduced via eval("const foo = (some expr);")). Also,
-      // uses the current function context instead of the top context.
-      //
-      // Note that we must declare the foo upon entry of eval(), via a
-      // context slot declaration, but we cannot initialize it at the same
-      // time, because the const declaration may be at the end of the eval
-      // code (sigh...) and the const variable may have been used before
-      // (where its value is 'undefined'). Thus, we can only do the
-      // initialization when we actually encounter the expression and when
-      // the expression operands are defined and valid, and thus we need the
-      // split into 2 operations: declaration of the context slot followed
-      // by initialization.
-      value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
-    } else {
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
-    }
-    // Storing a variable must keep the (new) value on the expression
-    // stack. This is necessary for compiling chained assignment
-    // expressions.
-    frame_->Push(&value);
-  } else {
-    ASSERT(!slot->var()->is_dynamic());
-
-    JumpTarget exit;
-    if (init_state == CONST_INIT) {
-      ASSERT(slot->var()->mode() == Variable::CONST);
-      // Only the first const initialization must be executed (the slot
-      // still contains 'the hole' value). When the assignment is executed,
-      // the code is identical to a normal store (see below).
-      //
-      // We spill the frame in the code below because the direct-frame
-      // access of SlotOperand is potentially unsafe with an unspilled
-      // frame.
-      VirtualFrame::SpilledScope spilled_scope;
-      Comment cmnt(masm_, "[ Init const");
-      __ movq(rcx, SlotOperand(slot, rcx));
-      __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
-      exit.Branch(not_equal);
-    }
-
-    // We must execute the store.  Storing a variable must keep the (new)
-    // value on the stack. This is necessary for compiling assignment
-    // expressions.
-    //
-    // Note: We will reach here even with slot->var()->mode() ==
-    // Variable::CONST because of const declarations which will initialize
-    // consts to 'the hole' value and by doing so, end up calling this code.
-    if (slot->type() == Slot::PARAMETER) {
-      frame_->StoreToParameterAt(slot->index());
-    } else if (slot->type() == Slot::LOCAL) {
-      frame_->StoreToLocalAt(slot->index());
-    } else {
-      // The other slot types (LOOKUP and GLOBAL) cannot reach here.
-      //
-      // The use of SlotOperand below is safe for an unspilled frame
-      // because the slot is a context slot.
-      ASSERT(slot->type() == Slot::CONTEXT);
-      frame_->Dup();
-      Result value = frame_->Pop();
-      value.ToRegister();
-      Result start = allocator_->Allocate();
-      ASSERT(start.is_valid());
-      __ movq(SlotOperand(slot, start.reg()), value.reg());
-      // RecordWrite may destroy the value registers.
-      //
-      // TODO(204): Avoid actually spilling when the value is not
-      // needed (probably the common case).
-      frame_->Spill(value.reg());
-      int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
-      // The results start, value, and temp are unused by going out of
-      // scope.
-    }
-
-    exit.Bind();
-  }
-}
-
-
-void CodeGenerator::VisitSlot(Slot* node) {
-  Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
-}
-
-
-void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-  Comment cmnt(masm_, "[ VariableProxy");
-  Variable* var = node->var();
-  Expression* expr = var->rewrite();
-  if (expr != NULL) {
-    Visit(expr);
-  } else {
-    ASSERT(var->is_global());
-    Reference ref(this, node);
-    ref.GetValue();
-  }
-}
-
-
-void CodeGenerator::VisitLiteral(Literal* node) {
-  Comment cmnt(masm_, "[ Literal");
-  frame_->Push(node->handle());
-}
-
-
-void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
-  UNIMPLEMENTED();
-  // TODO(X64): Implement security policy for loads of smis.
-}
-
-
-bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
-  return false;
-}
-
-
-// Materialize the regexp literal 'node' in the literals array
-// 'literals' of the function.  Leave the regexp boilerplate in
-// 'boilerplate'.
-class DeferredRegExpLiteral: public DeferredCode {
- public:
-  DeferredRegExpLiteral(Register boilerplate,
-                        Register literals,
-                        RegExpLiteral* node)
-      : boilerplate_(boilerplate), literals_(literals), node_(node) {
-    set_comment("[ DeferredRegExpLiteral");
-  }
-
-  void Generate();
-
- private:
-  Register boilerplate_;
-  Register literals_;
-  RegExpLiteral* node_;
-};
-
-
-void DeferredRegExpLiteral::Generate() {
-  // Since the entry is undefined we call the runtime system to
-  // compute the literal.
-  // Literal array (0).
-  __ push(literals_);
-  // Literal index (1).
-  __ Push(Smi::FromInt(node_->literal_index()));
-  // RegExp pattern (2).
-  __ Push(node_->pattern());
-  // RegExp flags (3).
-  __ Push(node_->flags());
-  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
-class DeferredAllocateInNewSpace: public DeferredCode {
- public:
-  DeferredAllocateInNewSpace(int size,
-                             Register target,
-                             int registers_to_save = 0)
-    : size_(size), target_(target), registers_to_save_(registers_to_save) {
-    ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace());
-    set_comment("[ DeferredAllocateInNewSpace");
-  }
-  void Generate();
-
- private:
-  int size_;
-  Register target_;
-  int registers_to_save_;
-};
-
-
-void DeferredAllocateInNewSpace::Generate() {
-  for (int i = 0; i < kNumRegs; i++) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ push(save_register);
-    }
-  }
-  __ Push(Smi::FromInt(size_));
-  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
-  if (!target_.is(rax)) {
-    __ movq(target_, rax);
-  }
-  for (int i = kNumRegs - 1; i >= 0; i--) {
-    if (registers_to_save_ & (1 << i)) {
-      Register save_register = { i };
-      __ pop(save_register);
-    }
-  }
-}
-
-
-void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
-  Comment cmnt(masm_, "[ RegExp Literal");
-
-  // Retrieve the literals array and check the allocated entry.  Begin
-  // with a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  // Load the literal at the ast saved index.
-  Result boilerplate = allocator_->Allocate();
-  ASSERT(boilerplate.is_valid());
-  int literal_offset =
-      FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
-  // Check whether we need to materialize the RegExp object.  If so,
-  // jump to the deferred code passing the literals array.
-  DeferredRegExpLiteral* deferred =
-      new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
-  __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
-  deferred->Branch(equal);
-  deferred->BindExit();
-
-  // Register of boilerplate contains RegExp object.
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-
-  DeferredAllocateInNewSpace* allocate_fallback =
-      new DeferredAllocateInNewSpace(size, literals.reg());
-  frame_->Push(&boilerplate);
-  frame_->SpillTop();
-  __ AllocateInNewSpace(size,
-                        literals.reg(),
-                        tmp.reg(),
-                        no_reg,
-                        allocate_fallback->entry_label(),
-                        TAG_OBJECT);
-  allocate_fallback->BindExit();
-  boilerplate = frame_->Pop();
-  // Copy from boilerplate to clone and return clone.
-
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i));
-    __ movq(FieldOperand(literals.reg(), i), tmp.reg());
-  }
-  frame_->Push(&literals);
-}
-
-
-void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
-  Comment cmnt(masm_, "[ ObjectLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-  // Literal array.
-  frame_->Push(&literals);
-  // Literal index.
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  // Constant properties.
-  frame_->Push(node->constant_properties());
-  // Should the object literal have fast elements?
-  frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0));
-  Result clone;
-  if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else {
-    clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
-  }
-  frame_->Push(&clone);
-
-  // Mark all computed expressions that are bound to a key that
-  // is shadowed by a later occurrence of the same key. For the
-  // marked expressions, no store code is emitted.
-  node->CalculateEmitStore();
-
-  for (int i = 0; i < node->properties()->length(); i++) {
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        break;
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
-        // else fall through.
-      case ObjectLiteral::Property::COMPUTED: {
-        Handle<Object> key(property->key()->handle());
-        if (key->IsSymbol()) {
-          // Duplicate the object as the IC receiver.
-          frame_->Dup();
-          Load(property->value());
-          if (property->emit_store()) {
-            Result ignored =
-                frame_->CallStoreIC(Handle<String>::cast(key), false,
-                                    strict_mode_flag());
-            // A test rax instruction following the store IC call would
-            // indicate the presence of an inlined version of the
-            // store. Add a nop to indicate that there is no such
-            // inlined version.
-            __ nop();
-          } else {
-            frame_->Drop(2);
-          }
-          break;
-        }
-        // Fall through
-      }
-      case ObjectLiteral::Property::PROTOTYPE: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        Load(property->value());
-        if (property->emit_store()) {
-          frame_->Push(Smi::FromInt(NONE));   // PropertyAttributes
-          // Ignore the result.
-          Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
-        } else {
-          frame_->Drop(3);
-        }
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(1));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      case ObjectLiteral::Property::GETTER: {
-        // Duplicate the object as an argument to the runtime call.
-        frame_->Dup();
-        Load(property->key());
-        frame_->Push(Smi::FromInt(0));
-        Load(property->value());
-        Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4);
-        // Ignore the result.
-        break;
-      }
-      default: UNREACHABLE();
-    }
-  }
-}
-
-
-void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
-  Comment cmnt(masm_, "[ ArrayLiteral");
-
-  // Load a writable copy of the function of this activation in a
-  // register.
-  frame_->PushFunction();
-  Result literals = frame_->Pop();
-  literals.ToRegister();
-  frame_->Spill(literals.reg());
-
-  // Load the literals array of the function.
-  __ movq(literals.reg(),
-          FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
-  frame_->Push(&literals);
-  frame_->Push(Smi::FromInt(node->literal_index()));
-  frame_->Push(node->constant_elements());
-  int length = node->values()->length();
-  Result clone;
-  if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->cow_arrays_created_stub(), 1);
-  } else if (node->depth() > 1) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
-  } else {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
-    clone = frame_->CallStub(&stub, 3);
-  }
-  frame_->Push(&clone);
-
-  // Generate code to set the elements in the array that are not
-  // literals.
-  for (int i = 0; i < length; i++) {
-    Expression* value = node->values()->at(i);
-
-    if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) {
-      continue;
-    }
-
-    // The property must be set by generated code.
-    Load(value);
-
-    // Get the property value off the stack.
-    Result prop_value = frame_->Pop();
-    prop_value.ToRegister();
-
-    // Fetch the array literal while leaving a copy on the stack and
-    // use it to get the elements array.
-    frame_->Dup();
-    Result elements = frame_->Pop();
-    elements.ToRegister();
-    frame_->Spill(elements.reg());
-    // Get the elements FixedArray.
-    __ movq(elements.reg(),
-            FieldOperand(elements.reg(), JSObject::kElementsOffset));
-
-    // Write to the indexed properties array.
-    int offset = i * kPointerSize + FixedArray::kHeaderSize;
-    __ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
-
-    // Update the write barrier for the array address.
-    frame_->Spill(prop_value.reg());  // Overwritten by the write barrier.
-    Result scratch = allocator_->Allocate();
-    ASSERT(scratch.is_valid());
-    __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg());
-  }
-}
-
-
-void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
-  ASSERT(!in_spilled_code());
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  Load(node->key());
-  Load(node->value());
-  Result result =
-      frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::EmitSlotAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Variable Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL);
-  Slot* slot = var->AsSlot();
-  ASSERT(slot != NULL);
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Perform the assignment.
-  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
-    CodeForSourcePosition(node->position());
-    StoreToSlot(slot,
-                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
-  }
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm(), "[ Named Property Assignment");
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-  ASSERT(var == NULL || (prop == NULL && var->is_global()));
-
-  // Initialize name and evaluate the receiver sub-expression if necessary. If
-  // the receiver is trivial it is not placed on the stack at this point, but
-  // loaded whenever actually needed.
-  Handle<String> name;
-  bool is_trivial_receiver = false;
-  if (var != NULL) {
-    name = var->name();
-  } else {
-    Literal* lit = prop->key()->AsLiteral();
-    ASSERT_NOT_NULL(lit);
-    name = Handle<String>::cast(lit->handle());
-    // Do not materialize the receiver on the frame if it is trivial.
-    is_trivial_receiver = prop->obj()->IsTrivial();
-    if (!is_trivial_receiver) Load(prop->obj());
-  }
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    // Initialization block consists of assignments of the form expr.x = ..., so
-    // this will never be an assignment to a variable, so there must be a
-    // receiver object.
-    ASSERT_EQ(NULL, var);
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      frame()->Dup();
-    }
-    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block() && !is_trivial_receiver) {
-    frame()->Dup();
-  }
-
-  // Stack layout:
-  // [tos]   : receiver (only materialized if non-trivial)
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else if (var != NULL) {
-      // The LoadIC stub expects the object in rax.
-      // Freeing rax causes the code generator to load the global into it.
-      frame_->Spill(rax);
-      LoadGlobal();
-    } else {
-      frame()->Dup();
-    }
-    Result value = EmitNamedLoad(name, var != NULL);
-    frame()->Push(&value);
-    Load(node->value());
-
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    // Construct the implicit binary operation.
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : receiver (only materialized if non-trivial)
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(var == NULL || var->mode() != Variable::CONST);
-  ASSERT_NE(Token::INIT_CONST, node->op());
-  if (is_trivial_receiver) {
-    Result value = frame()->Pop();
-    frame()->Push(prop->obj());
-    frame()->Push(&value);
-  }
-  CodeForSourcePosition(node->position());
-  bool is_contextual = (var != NULL);
-  Result answer = EmitNamedStore(name, is_contextual);
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  if (node->ends_initialization_block()) {
-    ASSERT_EQ(NULL, var);
-    // The argument to the runtime call is the receiver.
-    if (is_trivial_receiver) {
-      frame()->Push(prop->obj());
-    } else {
-      // A copy of the receiver is below the value of the assignment.  Swap
-      // the receiver and the value of the assignment expression.
-      Result result = frame()->Pop();
-      Result receiver = frame()->Pop();
-      frame()->Push(&result);
-      frame()->Push(&receiver);
-    }
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT_EQ(frame()->height(), original_height + 1);
-}
-
-
-void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Comment cmnt(masm_, "[ Keyed Property Assignment");
-  Property* prop = node->target()->AsProperty();
-  ASSERT_NOT_NULL(prop);
-
-  // Evaluate the receiver subexpression.
-  Load(prop->obj());
-
-  // Change to slow case in the beginning of an initialization block to
-  // avoid the quadratic behavior of repeatedly adding fast properties.
-  if (node->starts_initialization_block()) {
-    frame_->Dup();
-    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-  }
-
-  // Change to fast case at the end of an initialization block. To prepare for
-  // that add an extra copy of the receiver to the frame, so that it can be
-  // converted back to fast case after the assignment.
-  if (node->ends_initialization_block()) {
-    frame_->Dup();
-  }
-
-  // Evaluate the key subexpression.
-  Load(prop->key());
-
-  // Stack layout:
-  // [tos]   : key
-  // [tos+1] : receiver
-  // [tos+2] : receiver if at the end of an initialization block
-
-  // Evaluate the right-hand side.
-  if (node->is_compound()) {
-    // For a compound assignment the right-hand side is a binary operation
-    // between the current property value and the actual right-hand side.
-    // Duplicate receiver and key for loading the current property value.
-    frame()->PushElementAt(1);
-    frame()->PushElementAt(1);
-    Result value = EmitKeyedLoad();
-    frame()->Push(&value);
-    Load(node->value());
-
-    // Perform the binary operation.
-    bool overwrite_value = node->value()->ResultOverwriteAllowed();
-    BinaryOperation expr(node);
-    GenericBinaryOperation(&expr,
-                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-  } else {
-    // For non-compound assignment just load the right-hand side.
-    Load(node->value());
-  }
-
-  // Stack layout:
-  // [tos]   : value
-  // [tos+1] : key
-  // [tos+2] : receiver
-  // [tos+3] : receiver if at the end of an initialization block
-
-  // Perform the assignment.  It is safe to ignore constants here.
-  ASSERT(node->op() != Token::INIT_CONST);
-  CodeForSourcePosition(node->position());
-  Result answer = EmitKeyedStore(prop->key()->type());
-  frame()->Push(&answer);
-
-  // Stack layout:
-  // [tos]   : result
-  // [tos+1] : receiver if at the end of an initialization block
-
-  // Change to fast case at the end of an initialization block.
-  if (node->ends_initialization_block()) {
-    // The argument to the runtime call is the extra copy of the receiver,
-    // which is below the value of the assignment.  Swap the receiver and
-    // the value of the assignment expression.
-    Result result = frame()->Pop();
-    Result receiver = frame()->Pop();
-    frame()->Push(&result);
-    frame()->Push(&receiver);
-    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-  }
-
-  // Stack layout:
-  // [tos]   : result
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Variable* var = node->target()->AsVariableProxy()->AsVariable();
-  Property* prop = node->target()->AsProperty();
-
-  if (var != NULL && !var->is_global()) {
-    EmitSlotAssignment(node);
-
-  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
-             (var != NULL && var->is_global())) {
-    // Properties whose keys are property names and global variables are
-    // treated as named property references.  We do not need to consider
-    // global 'this' because it is not a valid left-hand side.
-    EmitNamedPropertyAssignment(node);
-
-  } else if (prop != NULL) {
-    // Other properties (including rewritten parameters for a function that
-    // uses arguments) are keyed property assignments.
-    EmitKeyedPropertyAssignment(node);
-
-  } else {
-    // Invalid left-hand side.
-    Load(node->target());
-    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
-    // The runtime call doesn't actually return but the code generator will
-    // still generate code and expects a certain frame height.
-    frame()->Push(&result);
-  }
-
-  ASSERT(frame()->height() == original_height + 1);
-}
-
-
-void CodeGenerator::VisitThrow(Throw* node) {
-  Comment cmnt(masm_, "[ Throw");
-  Load(node->exception());
-  Result result = frame_->CallRuntime(Runtime::kThrow, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::VisitProperty(Property* node) {
-  Comment cmnt(masm_, "[ Property");
-  Reference property(this, node);
-  property.GetValue();
-}
-
-
-void CodeGenerator::VisitCall(Call* node) {
-  Comment cmnt(masm_, "[ Call");
-
-  ZoneList<Expression*>* args = node->arguments();
-
-  // Check if the function is a variable or a property.
-  Expression* function = node->expression();
-  Variable* var = function->AsVariableProxy()->AsVariable();
-  Property* property = function->AsProperty();
-
-  // ------------------------------------------------------------------------
-  // Fast-case: Use inline caching.
-  // ---
-  // According to ECMA-262, section 11.2.3, page 44, the function to call
-  // must be resolved after the arguments have been evaluated. The IC code
-  // automatically handles this by loading the arguments before the function
-  // is resolved in cache misses (this also holds for megamorphic calls).
-  // ------------------------------------------------------------------------
-
-  if (var != NULL && var->is_possibly_eval()) {
-    // ----------------------------------
-    // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
-    // ----------------------------------
-
-    // In a call to eval, we first call %ResolvePossiblyDirectEval to
-    // resolve the function we need to call and the receiver of the
-    // call.  Then we call the resolved function using the given
-    // arguments.
-
-    // Prepare the stack for the call to the resolved function.
-    Load(function);
-
-    // Allocate a frame slot for the receiver.
-    frame_->Push(FACTORY->undefined_value());
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Result to hold the result of the function resolution and the
-    // final result of the eval call.
-    Result result;
-
-    // If we know that eval can only be shadowed by eval-introduced
-    // variables we attempt to load the global eval function directly
-    // in generated code. If we succeed, there is no need to perform a
-    // context lookup in the runtime system.
-    JumpTarget done;
-    if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
-      ASSERT(var->AsSlot()->type() == Slot::LOOKUP);
-      JumpTarget slow;
-      // Prepare the stack for the call to
-      // ResolvePossiblyDirectEvalNoLookup by pushing the loaded
-      // function, the first argument to the eval call and the
-      // receiver.
-      Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(),
-                                                     NOT_INSIDE_TYPEOF,
-                                                     &slow);
-      frame_->Push(&fun);
-      if (arg_count > 0) {
-        frame_->PushElementAt(arg_count);
-      } else {
-        frame_->Push(FACTORY->undefined_value());
-      }
-      frame_->PushParameterAt(-1);
-
-      // Push the strict mode flag.
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-      // Resolve the call.
-      result =
-          frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
-
-      done.Jump(&result);
-      slow.Bind();
-    }
-
-    // Prepare the stack for the call to ResolvePossiblyDirectEval by
-    // pushing the loaded function, the first argument to the eval
-    // call and the receiver.
-    frame_->PushElementAt(arg_count + 1);
-    if (arg_count > 0) {
-      frame_->PushElementAt(arg_count);
-    } else {
-      frame_->Push(FACTORY->undefined_value());
-    }
-    frame_->PushParameterAt(-1);
-
-    // Push the strict mode flag.
-    frame_->Push(Smi::FromInt(strict_mode_flag()));
-
-    // Resolve the call.
-    result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
-
-    // If we generated fast-case code bind the jump-target where fast
-    // and slow case merge.
-    if (done.is_linked()) done.Bind(&result);
-
-    // The runtime call returns a pair of values in rax (function) and
-    // rdx (receiver). Touch up the stack with the right values.
-    Result receiver = allocator_->Allocate(rdx);
-    frame_->SetElementAt(arg_count + 1, &result);
-    frame_->SetElementAt(arg_count, &receiver);
-    receiver.Unuse();
-
-    // Call the function.
-    CodeForSourcePosition(node->position());
-    InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
-    CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
-    result = frame_->CallStub(&call_function, arg_count + 1);
-
-    // Restore the context and overwrite the function on the stack with
-    // the result.
-    frame_->RestoreContextRegister();
-    frame_->SetElementAt(0, &result);
-
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
-    // ----------------------------------
-
-    // Pass the global object as the receiver and let the IC stub
-    // patch the stack to use the global proxy as 'this' in the
-    // invoked function.
-    LoadGlobal();
-
-    // Load the arguments.
-    int arg_count = args->length();
-    for (int i = 0; i < arg_count; i++) {
-      Load(args->at(i));
-      frame_->SpillTop();
-    }
-
-    // Push the name of the function on the frame.
-    frame_->Push(var->name());
-
-    // Call the IC initialization code.
-    CodeForSourcePosition(node->position());
-    Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT,
-                                       arg_count,
-                                       loop_nesting());
-    frame_->RestoreContextRegister();
-    // Replace the function on the stack with the result.
-    frame_->Push(&result);
-
-  } else if (var != NULL && var->AsSlot() != NULL &&
-             var->AsSlot()->type() == Slot::LOOKUP) {
-    // ----------------------------------
-    // JavaScript examples:
-    //
-    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
-    //
-    //  function f() {};
-    //  function g() {
-    //    eval(...);
-    //    f();  // f could be in extension object.
-    //  }
-    // ----------------------------------
-
-    JumpTarget slow, done;
-    Result function;
-
-    // Generate fast case for loading functions from slots that
-    // correspond to local/global variables or arguments unless they
-    // are shadowed by eval-introduced bindings.
-    EmitDynamicLoadFromSlotFastCase(var->AsSlot(),
-                                    NOT_INSIDE_TYPEOF,
-                                    &function,
-                                    &slow,
-                                    &done);
-
-    slow.Bind();
-    // Load the function from the context.  Sync the frame so we can
-    // push the arguments directly into place.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(rsi);
-    frame_->EmitPush(var->name());
-    frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
-    // The runtime call returns a pair of values in rax and rdx.  The
-    // looked-up function is in rax and the receiver is in rdx.  These
-    // register references are not ref counted here.  We spill them
-    // eagerly since they are arguments to an inevitable call (and are
-    // not sharable by the arguments).
-    ASSERT(!allocator()->is_used(rax));
-    frame_->EmitPush(rax);
-
-    // Load the receiver.
-    ASSERT(!allocator()->is_used(rdx));
-    frame_->EmitPush(rdx);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      JumpTarget call;
-      call.Jump();
-      done.Bind(&function);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      call.Bind();
-    }
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-
-  } else if (property != NULL) {
-    // Check if the key is a literal string.
-    Literal* literal = property->key()->AsLiteral();
-
-    if (literal != NULL && literal->handle()->IsSymbol()) {
-      // ------------------------------------------------------------------
-      // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
-      // ------------------------------------------------------------------
-
-      Handle<String> name = Handle<String>::cast(literal->handle());
-
-      if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
-          name->IsEqualTo(CStrVector("apply")) &&
-          args->length() == 2 &&
-          args->at(1)->AsVariableProxy() != NULL &&
-          args->at(1)->AsVariableProxy()->IsArguments()) {
-        // Use the optimized Function.prototype.apply that avoids
-        // allocating lazily allocated arguments objects.
-        CallApplyLazy(property->obj(),
-                      args->at(0),
-                      args->at(1)->AsVariableProxy(),
-                      node->position());
-
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Push the name of the function onto the frame.
-        frame_->Push(name);
-
-        // Call the IC initialization code.
-        CodeForSourcePosition(node->position());
-        Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                           arg_count,
-                                           loop_nesting());
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-
-    } else {
-      // -------------------------------------------
-      // JavaScript example: 'array[index](1, 2, 3)'
-      // -------------------------------------------
-
-      // Load the function to call from the property through a reference.
-      if (property->is_synthetic()) {
-        Reference ref(this, property, false);
-        ref.GetValue();
-        // Use global object as receiver.
-        LoadGlobalReceiver();
-       // Call the function.
-        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
-      } else {
-        // Push the receiver onto the frame.
-        Load(property->obj());
-
-        // Load the name of the function.
-        Load(property->key());
-
-        // Swap the name of the function and the receiver on the stack to follow
-        // the calling convention for call ICs.
-        Result key = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&key);
-        frame_->Push(&receiver);
-        key.Unuse();
-        receiver.Unuse();
-
-        // Load the arguments.
-        int arg_count = args->length();
-        for (int i = 0; i < arg_count; i++) {
-          Load(args->at(i));
-          frame_->SpillTop();
-        }
-
-        // Place the key on top of stack and call the IC initialization code.
-        frame_->PushElementAt(arg_count + 1);
-        CodeForSourcePosition(node->position());
-        Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
-                                                arg_count,
-                                                loop_nesting());
-        frame_->Drop();  // Drop the key still on the stack.
-        frame_->RestoreContextRegister();
-        frame_->Push(&result);
-      }
-    }
-  } else {
-    // ----------------------------------
-    // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
-    // ----------------------------------
-
-    // Load the function.
-    Load(function);
-
-    // Pass the global proxy as the receiver.
-    LoadGlobalReceiver();
-
-    // Call the function.
-    CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
-  }
-}
-
-
-void CodeGenerator::VisitCallNew(CallNew* node) {
-  Comment cmnt(masm_, "[ CallNew");
-
-  // According to ECMA-262, section 11.2.2, page 44, the function
-  // expression in new calls must be evaluated before the
-  // arguments. This is different from ordinary calls, where the
-  // actual function to call is resolved after the arguments have been
-  // evaluated.
-
-  // Push constructor on the stack.  If it's not a function it's used as
-  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
-  // ignored.
-  Load(node->expression());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = node->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  CodeForSourcePosition(node->position());
-  Result result = frame_->CallConstructor(arg_count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  value.Unuse();
-  destination()->Split(is_smi);
-}
-
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  // Conditionally generate a log call.
-  // Args:
-  //   0 (literal string): The type of logging (corresponds to the flags).
-  //     This is used to determine whether or not to generate the log call.
-  //   1 (string): Format string.  Access the string at argument index 2
-  //     with '%2s' (see Logger::LogRuntime for all the formats).
-  //   2 (array): Arguments to the format string.
-  ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
-  if (ShouldGenerateLog(args->at(0))) {
-    Load(args->at(1));
-    Load(args->at(2));
-    frame_->CallRuntime(Runtime::kLog, 2);
-  }
-#endif
-  // Finally, we're expected to leave a value on the top of the stack.
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg());
-  value.Unuse();
-  destination()->Split(non_negative_smi);
-}
-
-
-class DeferredStringCharCodeAt : public DeferredCode {
- public:
-  DeferredStringCharCodeAt(Register object,
-                           Register index,
-                           Register scratch,
-                           Register result)
-      : result_(result),
-        char_code_at_generator_(object,
-                                index,
-                                scratch,
-                                result,
-                                &need_conversion_,
-                                &need_conversion_,
-                                &index_out_of_range_,
-                                STRING_INDEX_IS_NUMBER) {}
-
-  StringCharCodeAtGenerator* fast_case_generator() {
-    return &char_code_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_code_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move the undefined value into the result register, which will
-    // trigger conversion.
-    __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // NaN.
-    __ LoadRoot(result_, Heap::kNanValueRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharCodeAtGenerator char_code_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charCodeAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharCodeAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need two extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(object.reg(),
-                                   index.reg(),
-                                   scratch.reg(),
-                                   result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharFromCode : public DeferredCode {
- public:
-  DeferredStringCharFromCode(Register code,
-                             Register result)
-      : char_from_code_generator_(code, result) {}
-
-  StringCharFromCodeGenerator* fast_case_generator() {
-    return &char_from_code_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_from_code_generator_.GenerateSlow(masm(), call_helper);
-  }
-
- private:
-  StringCharFromCodeGenerator char_from_code_generator_;
-};
-
-
-// Generates code for creating a one-char string from a char code.
-void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharFromCode");
-  ASSERT(args->length() == 1);
-
-  Load(args->at(0));
-
-  Result code = frame_->Pop();
-  code.ToRegister();
-  ASSERT(code.is_valid());
-
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-
-  DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
-      code.reg(), result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-class DeferredStringCharAt : public DeferredCode {
- public:
-  DeferredStringCharAt(Register object,
-                       Register index,
-                       Register scratch1,
-                       Register scratch2,
-                       Register result)
-      : result_(result),
-        char_at_generator_(object,
-                           index,
-                           scratch1,
-                           scratch2,
-                           result,
-                           &need_conversion_,
-                           &need_conversion_,
-                           &index_out_of_range_,
-                           STRING_INDEX_IS_NUMBER) {}
-
-  StringCharAtGenerator* fast_case_generator() {
-    return &char_at_generator_;
-  }
-
-  virtual void Generate() {
-    VirtualFrameRuntimeCallHelper call_helper(frame_state());
-    char_at_generator_.GenerateSlow(masm(), call_helper);
-
-    __ bind(&need_conversion_);
-    // Move smi zero into the result register, which will trigger
-    // conversion.
-    __ Move(result_, Smi::FromInt(0));
-    __ jmp(exit_label());
-
-    __ bind(&index_out_of_range_);
-    // When the index is out of range, the spec requires us to return
-    // the empty string.
-    __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
-    __ jmp(exit_label());
-  }
-
- private:
-  Register result_;
-
-  Label need_conversion_;
-  Label index_out_of_range_;
-
-  StringCharAtGenerator char_at_generator_;
-};
-
-
-// This generates code that performs a String.prototype.charAt() call
-// or returns a smi in order to trigger conversion.
-void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
-  Comment(masm_, "[ GenerateStringCharAt");
-  ASSERT(args->length() == 2);
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Result index = frame_->Pop();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  index.ToRegister();
-  // We might mutate the object register.
-  frame_->Spill(object.reg());
-
-  // We need three extra registers.
-  Result result = allocator()->Allocate();
-  ASSERT(result.is_valid());
-  Result scratch1 = allocator()->Allocate();
-  ASSERT(scratch1.is_valid());
-  Result scratch2 = allocator()->Allocate();
-  ASSERT(scratch2.is_valid());
-
-  DeferredStringCharAt* deferred =
-      new DeferredStringCharAt(object.reg(),
-                               index.reg(),
-                               scratch1.reg(),
-                               scratch2.reg(),
-                               result.reg());
-  deferred->fast_case_generator()->GenerateFast(masm_);
-  deferred->BindExit();
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // It is a heap object - get map.
-  // Check if the object is a JS array or not.
-  __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // It is a heap object - get map.
-  // Check if the object is a regexp.
-  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-
-  __ Move(kScratchRegister, FACTORY->null_value());
-  __ cmpq(obj.reg(), kScratchRegister);
-  destination()->true_target()->Branch(equal);
-
-  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined when tested with typeof.
-  __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-          Immediate(1 << Map::kIsUndetectable));
-  destination()->false_target()->Branch(not_zero);
-  __ movzxbq(kScratchRegister,
-             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
-  __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
-  destination()->false_target()->Branch(below);
-  __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
-  obj.Unuse();
-  destination()->Split(below_equal);
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' ||
-  // typeof(arg) == function).
-  // It includes undetectable objects (as opposed to IsObject).
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  Condition is_smi = masm_->CheckSmi(value.reg());
-  destination()->false_target()->Branch(is_smi);
-  // Check that this is an object.
-  __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
-  value.Unuse();
-  destination()->Split(above_equal);
-}
-
-
-// Deferred code to check whether the String JavaScript object is safe for using
-// default value of. This code is called after the bit caching this information
-// in the map has been checked with the map for the object in the map_result_
-// register. On return the register map_result_ contains 1 for true and 0 for
-// false.
-class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
- public:
-  DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
-                                               Register map_result,
-                                               Register scratch1,
-                                               Register scratch2)
-      : object_(object),
-        map_result_(map_result),
-        scratch1_(scratch1),
-        scratch2_(scratch2) { }
-
-  virtual void Generate() {
-    Label false_result;
-
-    // Check that map is loaded as expected.
-    if (FLAG_debug_code) {
-      __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-      __ Assert(equal, "Map not in expected register");
-    }
-
-    // Check for fast case object. Generate false result for slow case object.
-    __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset));
-    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex);
-    __ j(equal, &false_result);
-
-    // Look for valueOf symbol in the descriptor array, and indicate false if
-    // found. The type is not checked, so if it is a transition it is a false
-    // negative.
-    __ movq(map_result_,
-           FieldOperand(map_result_, Map::kInstanceDescriptorsOffset));
-    __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset));
-    // map_result_: descriptor array
-    // scratch1_: length of descriptor array
-    // Calculate the end of the descriptor array.
-    SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2);
-    __ lea(scratch1_,
-           Operand(
-               map_result_, index.reg, index.scale, FixedArray::kHeaderSize));
-    // Calculate location of the first key name.
-    __ addq(map_result_,
-            Immediate(FixedArray::kHeaderSize +
-                      DescriptorArray::kFirstIndex * kPointerSize));
-    // Loop through all the keys in the descriptor array. If one of these is the
-    // symbol valueOf the result is false.
-    Label entry, loop;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(scratch2_, FieldOperand(map_result_, 0));
-    __ Cmp(scratch2_, FACTORY->value_of_symbol());
-    __ j(equal, &false_result);
-    __ addq(map_result_, Immediate(kPointerSize));
-    __ bind(&entry);
-    __ cmpq(map_result_, scratch1_);
-    __ j(not_equal, &loop);
-
-    // Reload map as register map_result_ was used as temporary above.
-    __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset));
-
-    // If a valueOf property is not found on the object check that it's
-    // prototype is the un-modified String prototype. If not result is false.
-    __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset));
-    __ testq(scratch1_, Immediate(kSmiTagMask));
-    __ j(zero, &false_result);
-    __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset));
-    __ movq(scratch2_,
-            Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ movq(scratch2_,
-            FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset));
-    __ cmpq(scratch1_,
-            ContextOperand(
-                scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-    __ j(not_equal, &false_result);
-    // Set the bit in the map to indicate that it has been checked safe for
-    // default valueOf and set true result.
-    __ or_(FieldOperand(map_result_, Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-    __ Set(map_result_, 1);
-    __ jmp(exit_label());
-    __ bind(&false_result);
-    // Set false result.
-    __ Set(map_result_, 0);
-  }
-
- private:
-  Register object_;
-  Register map_result_;
-  Register scratch1_;
-  Register scratch2_;
-};
-
-
-void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();  // Pop the string wrapper.
-  obj.ToRegister();
-  ASSERT(obj.is_valid());
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(obj.reg());
-  }
-
-  // Check whether this map has already been checked to be safe for default
-  // valueOf.
-  Result map_result = allocator()->Allocate();
-  ASSERT(map_result.is_valid());
-  __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset),
-           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-  destination()->true_target()->Branch(not_zero);
-
-  // We need an additional two scratch registers for the deferred code.
-  Result temp1 = allocator()->Allocate();
-  ASSERT(temp1.is_valid());
-  Result temp2 = allocator()->Allocate();
-  ASSERT(temp2.is_valid());
-
-  DeferredIsStringWrapperSafeForDefaultValueOf* deferred =
-      new DeferredIsStringWrapperSafeForDefaultValueOf(
-          obj.reg(), map_result.reg(), temp1.reg(), temp2.reg());
-  deferred->Branch(zero);
-  deferred->BindExit();
-  __ testq(map_result.reg(), map_result.reg());
-  obj.Unuse();
-  map_result.Unuse();
-  temp1.Unuse();
-  temp2.Unuse();
-  destination()->Split(not_equal);
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
-  // This generates a fast version of:
-  // (%_ClassOf(arg) === 'Function')
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
-  obj.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  destination()->false_target()->Branch(is_smi);
-  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
-  __ movzxbl(kScratchRegister,
-             FieldOperand(kScratchRegister, Map::kBitFieldOffset));
-  __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
-  obj.Unuse();
-  destination()->Split(not_zero);
-}
-
-
-void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  // Get the frame pointer for the calling frame.
-  Result fp = allocator()->Allocate();
-  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &check_frame_marker);
-  __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-         Smi::FromInt(StackFrame::CONSTRUCT));
-  fp.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-
-  Result fp = allocator_->Allocate();
-  Result result = allocator_->Allocate();
-  ASSERT(fp.is_valid() && result.is_valid());
-
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
-
-  // Check if the calling frame is an arguments adaptor frame.
-  __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &exit);
-
-  // Arguments adaptor case: Read the arguments length from the
-  // adaptor frame.
-  __ movq(result.reg(),
-          Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
-
-  __ bind(&exit);
-  result.set_type_info(TypeInfo::Smi());
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(result.reg());
-  }
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave, null, function, non_function_constructor;
-  Load(args->at(0));  // Load the object.
-  Result obj = frame_->Pop();
-  obj.ToRegister();
-  frame_->Spill(obj.reg());
-
-  // If the object is a smi, we return null.
-  Condition is_smi = masm_->CheckSmi(obj.reg());
-  null.Branch(is_smi);
-
-  // Check that the object is a JS object but take special care of JS
-  // functions to make sure they have 'Function' as their class.
-
-  __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
-  null.Branch(below);
-
-  // As long as JS_FUNCTION_TYPE is the last instance type and it is
-  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
-  // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
-  function.Branch(equal);
-
-  // Check if the constructor in the map is a function.
-  __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
-  __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
-  non_function_constructor.Branch(not_equal);
-
-  // The obj register now contains the constructor function. Grab the
-  // instance class name from there.
-  __ movq(obj.reg(),
-          FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
-  __ movq(obj.reg(),
-          FieldOperand(obj.reg(),
-                       SharedFunctionInfo::kInstanceClassNameOffset));
-  frame_->Push(&obj);
-  leave.Jump();
-
-  // Functions have class 'Function'.
-  function.Bind();
-  frame_->Push(FACTORY->function_class_symbol());
-  leave.Jump();
-
-  // Objects with a non-function constructor have class 'Object'.
-  non_function_constructor.Bind();
-  frame_->Push(FACTORY->Object_symbol());
-  leave.Jump();
-
-  // Non-JS objects have class null.
-  null.Bind();
-  frame_->Push(FACTORY->null_value());
-
-  // All done.
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  frame_->Dup();
-  Result object = frame_->Pop();
-  object.ToRegister();
-  ASSERT(object.is_valid());
-  // if (object->IsSmi()) return object.
-  Condition is_smi = masm_->CheckSmi(object.reg());
-  leave.Branch(is_smi);
-  // It is a heap object - get map.
-  Result temp = allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  // if (!object->IsJSValue()) return object.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
-  leave.Branch(not_equal);
-  __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
-  object.Unuse();
-  frame_->SetElementAt(0, &temp);
-  leave.Bind();
-}
-
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  JumpTarget leave;
-  Load(args->at(0));  // Load the object.
-  Load(args->at(1));  // Load the value.
-  Result value = frame_->Pop();
-  Result object = frame_->Pop();
-  value.ToRegister();
-  object.ToRegister();
-
-  // if (object->IsSmi()) return value.
-  Condition is_smi = masm_->CheckSmi(object.reg());
-  leave.Branch(is_smi, &value);
-
-  // It is a heap object - get its map.
-  Result scratch = allocator_->Allocate();
-  ASSERT(scratch.is_valid());
-  // if (!object->IsJSValue()) return value.
-  __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
-  leave.Branch(not_equal, &value);
-
-  // Store the value.
-  __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
-  // Update the write barrier.  Save the value as it will be
-  // overwritten by the write barrier code and is needed afterward.
-  Result duplicate_value = allocator_->Allocate();
-  ASSERT(duplicate_value.is_valid());
-  __ movq(duplicate_value.reg(), value.reg());
-  // The object register is also overwritten by the write barrier and
-  // possibly aliased in the frame.
-  frame_->Spill(object.reg());
-  __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
-                 scratch.reg());
-  object.Unuse();
-  scratch.Unuse();
-  duplicate_value.Unuse();
-
-  // Leave.
-  leave.Bind(&value);
-  frame_->Push(&value);
-}
-
-
-void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-
-  // ArgumentsAccessStub expects the key in rdx and the formal
-  // parameter count in rax.
-  Load(args->at(0));
-  Result key = frame_->Pop();
-  // Explicitly create a constant result.
-  Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
-  // Call the shared stub to get to arguments[key].
-  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
-  Result result = frame_->CallStub(&stub, &key, &count);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-
-  // Load the two objects into registers and perform the comparison.
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right = frame_->Pop();
-  Result left = frame_->Pop();
-  right.ToRegister();
-  left.ToRegister();
-  __ cmpq(right.reg(), left.reg());
-  right.Unuse();
-  left.Unuse();
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  // RBP value is aligned, so it should be tagged as a smi (without necesarily
-  // being padded as a smi, so it should not be treated as a smi.).
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  Result rbp_as_smi = allocator_->Allocate();
-  ASSERT(rbp_as_smi.is_valid());
-  __ movq(rbp_as_smi.reg(), rbp);
-  frame_->Push(&rbp_as_smi);
-}
-
-
-void CodeGenerator::GenerateRandomHeapNumber(
-    ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 0);
-  frame_->SpillAll();
-
-  Label slow_allocate_heapnumber;
-  Label heapnumber_allocated;
-  __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
-  __ jmp(&heapnumber_allocated);
-
-  __ bind(&slow_allocate_heapnumber);
-  // Allocate a heap number.
-  __ CallRuntime(Runtime::kNumberAlloc, 0);
-  __ movq(rbx, rax);
-
-  __ bind(&heapnumber_allocated);
-
-  // Return a random uint32 number in rax.
-  // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(0);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
-
-  // Convert 32 random bits in rax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-  __ movd(xmm1, rcx);
-  __ movd(xmm0, rax);
-  __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
-  __ subsd(xmm0, xmm1);
-  __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
-
-  __ movq(rax, rbx);
-  Result result = allocator_->Allocate(rax);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringAddStub stub(NO_STRING_ADD_FLAGS);
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  SubStringStub stub;
-  Result answer = frame_->CallStub(&stub, 3);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-
-  StringCompareStub stub;
-  Result answer = frame_->CallStub(&stub, 2);
-  frame_->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 4);
-
-  // Load the arguments on the stack and call the runtime system.
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-  Load(args->at(3));
-  RegExpExecStub stub;
-  Result result = frame_->CallStub(&stub, 4);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
-  ASSERT_EQ(3, args->length());
-  Load(args->at(0));  // Size of array, smi.
-  Load(args->at(1));  // "index" property value.
-  Load(args->at(2));  // "input" property value.
-  RegExpConstructResultStub stub;
-  Result result = frame_->CallStub(&stub, 3);
-  frame_->Push(&result);
-}
-
-
-class DeferredSearchCache: public DeferredCode {
- public:
-  DeferredSearchCache(Register dst,
-                      Register cache,
-                      Register key,
-                      Register scratch)
-      : dst_(dst), cache_(cache), key_(key), scratch_(scratch) {
-    set_comment("[ DeferredSearchCache");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;    // on invocation index of finger (as int32), on exit
-                    // holds value being looked up.
-  Register cache_;  // instance of JSFunctionResultCache.
-  Register key_;    // key being looked up.
-  Register scratch_;
-};
-
-
-// Return a position of the element at |index| + |additional_offset|
-// in FixedArray pointer to which is held in |array|.  |index| is int32.
-static Operand ArrayElement(Register array,
-                            Register index,
-                            int additional_offset = 0) {
-  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
-  return FieldOperand(array, index, times_pointer_size, offset);
-}
-
-
-void DeferredSearchCache::Generate() {
-  Label first_loop, search_further, second_loop, cache_miss;
-
-  Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
-  Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
-
-  // Check the cache from finger to start of the cache.
-  __ bind(&first_loop);
-  __ subl(dst_, kEntrySizeImm);
-  __ cmpl(dst_, kEntriesIndexImm);
-  __ j(less, &search_further);
-
-  __ cmpq(ArrayElement(cache_, dst_), key_);
-  __ j(not_equal, &first_loop);
-
-  __ Integer32ToSmiField(
-      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ movq(dst_, ArrayElement(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&search_further);
-
-  // Check the cache from end of cache up to finger.
-  __ SmiToInteger32(dst_,
-                    FieldOperand(cache_,
-                                 JSFunctionResultCache::kCacheSizeOffset));
-  __ SmiToInteger32(scratch_,
-                    FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
-
-  __ bind(&second_loop);
-  __ subl(dst_, kEntrySizeImm);
-  __ cmpl(dst_, scratch_);
-  __ j(less_equal, &cache_miss);
-
-  __ cmpq(ArrayElement(cache_, dst_), key_);
-  __ j(not_equal, &second_loop);
-
-  __ Integer32ToSmiField(
-      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ movq(dst_, ArrayElement(cache_, dst_, 1));
-  __ jmp(exit_label());
-
-  __ bind(&cache_miss);
-  __ push(cache_);  // store a reference to cache
-  __ push(key_);  // store a key
-  __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ push(key_);
-  // On x64 function must be in rdi.
-  __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
-  ParameterCount expected(1);
-  __ InvokeFunction(rdi, expected, CALL_FUNCTION);
-
-  // Find a place to put new cached value into.
-  Label add_new_entry, update_cache;
-  __ movq(rcx, Operand(rsp, kPointerSize));  // restore the cache
-  // Possible optimization: cache size is constant for the given cache
-  // so technically we could use a constant here.  However, if we have
-  // cache miss this optimization would hardly matter much.
-
-  // Check if we could add new entry to cache.
-  __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ SmiToInteger32(r9,
-                    FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
-  __ cmpl(rbx, r9);
-  __ j(greater, &add_new_entry);
-
-  // Check if we could evict entry after finger.
-  __ SmiToInteger32(rdx,
-                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ addl(rdx, kEntrySizeImm);
-  Label forward;
-  __ cmpl(rbx, rdx);
-  __ j(greater, &forward);
-  // Need to wrap over the cache.
-  __ movl(rdx, kEntriesIndexImm);
-  __ bind(&forward);
-  __ movl(r9, rdx);
-  __ jmp(&update_cache);
-
-  __ bind(&add_new_entry);
-  // r9 holds cache size as int32.
-  __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
-  __ Integer32ToSmiField(
-      FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
-
-  // Update the cache itself.
-  // r9 holds the index as int32.
-  __ bind(&update_cache);
-  __ pop(rbx);  // restore the key
-  __ Integer32ToSmiField(
-      FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
-  // Store key.
-  __ movq(ArrayElement(rcx, r9), rbx);
-  __ RecordWrite(rcx, 0, rbx, r9);
-
-  // Store value.
-  __ pop(rcx);  // restore the cache.
-  __ SmiToInteger32(rdx,
-                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ incl(rdx);
-  // Backup rax, because the RecordWrite macro clobbers its arguments.
-  __ movq(rbx, rax);
-  __ movq(ArrayElement(rcx, rdx), rax);
-  __ RecordWrite(rcx, 0, rbx, rdx);
-
-  if (!dst_.is(rax)) {
-    __ movq(dst_, rax);
-  }
-}
-
-
-void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
-  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
-
-  Handle<FixedArray> jsfunction_result_caches(
-      Isolate::Current()->global_context()->jsfunction_result_caches());
-  if (jsfunction_result_caches->length() <= cache_id) {
-    __ Abort("Attempt to use undefined cache.");
-    frame_->Push(FACTORY->undefined_value());
-    return;
-  }
-
-  Load(args->at(1));
-  Result key = frame_->Pop();
-  key.ToRegister();
-
-  Result cache = allocator()->Allocate();
-  ASSERT(cache.is_valid());
-  __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX));
-  __ movq(cache.reg(),
-          FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset));
-  __ movq(cache.reg(),
-          ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX));
-  __ movq(cache.reg(),
-          FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id)));
-
-  Result tmp = allocator()->Allocate();
-  ASSERT(tmp.is_valid());
-
-  Result scratch = allocator()->Allocate();
-  ASSERT(scratch.is_valid());
-
-  DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(),
-                                                          cache.reg(),
-                                                          key.reg(),
-                                                          scratch.reg());
-
-  const int kFingerOffset =
-      FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  // tmp.reg() now holds finger offset as a smi.
-  __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
-  __ cmpq(key.reg(), FieldOperand(cache.reg(),
-                                  tmp.reg(), times_pointer_size,
-                                  FixedArray::kHeaderSize));
-  deferred->Branch(not_equal);
-  __ movq(tmp.reg(), FieldOperand(cache.reg(),
-                                  tmp.reg(), times_pointer_size,
-                                  FixedArray::kHeaderSize + kPointerSize));
-
-  deferred->BindExit();
-  frame_->Push(&tmp);
-}
-
-
-void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-
-  // Load the argument on the stack and jump to the runtime.
-  Load(args->at(0));
-
-  NumberToStringStub stub;
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-class DeferredSwapElements: public DeferredCode {
- public:
-  DeferredSwapElements(Register object, Register index1, Register index2)
-      : object_(object), index1_(index1), index2_(index2) {
-    set_comment("[ DeferredSwapElements");
-  }
-
-  virtual void Generate();
-
- private:
-  Register object_, index1_, index2_;
-};
-
-
-void DeferredSwapElements::Generate() {
-  __ push(object_);
-  __ push(index1_);
-  __ push(index2_);
-  __ CallRuntime(Runtime::kSwapElements, 3);
-}
-
-
-void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateSwapElements");
-
-  ASSERT_EQ(3, args->length());
-
-  Load(args->at(0));
-  Load(args->at(1));
-  Load(args->at(2));
-
-  Result index2 = frame_->Pop();
-  index2.ToRegister();
-
-  Result index1 = frame_->Pop();
-  index1.ToRegister();
-
-  Result object = frame_->Pop();
-  object.ToRegister();
-
-  Result tmp1 = allocator()->Allocate();
-  tmp1.ToRegister();
-  Result tmp2 = allocator()->Allocate();
-  tmp2.ToRegister();
-
-  frame_->Spill(object.reg());
-  frame_->Spill(index1.reg());
-  frame_->Spill(index2.reg());
-
-  DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(),
-                                                            index1.reg(),
-                                                            index2.reg());
-
-  // Fetch the map and check if array is in fast case.
-  // Check that object doesn't require security checks and
-  // has no indexed interceptor.
-  __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg());
-  deferred->Branch(not_equal);
-  __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
-           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
-  deferred->Branch(not_zero);
-
-  // Check the object's elements are in fast case and writable.
-  __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  deferred->Branch(not_equal);
-
-  // Check that both indices are smis.
-  Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg());
-  deferred->Branch(NegateCondition(both_smi));
-
-  // Check that both indices are valid.
-  __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset));
-  __ SmiCompare(tmp2.reg(), index1.reg());
-  deferred->Branch(below_equal);
-  __ SmiCompare(tmp2.reg(), index2.reg());
-  deferred->Branch(below_equal);
-
-  // Bring addresses into index1 and index2.
-  __ SmiToInteger32(index1.reg(), index1.reg());
-  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
-                                    index1.reg(),
-                                    times_pointer_size,
-                                    FixedArray::kHeaderSize));
-  __ SmiToInteger32(index2.reg(), index2.reg());
-  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
-                                    index2.reg(),
-                                    times_pointer_size,
-                                    FixedArray::kHeaderSize));
-
-  // Swap elements.
-  __ movq(object.reg(), Operand(index1.reg(), 0));
-  __ movq(tmp2.reg(), Operand(index2.reg(), 0));
-  __ movq(Operand(index2.reg(), 0), object.reg());
-  __ movq(Operand(index1.reg(), 0), tmp2.reg());
-
-  Label done;
-  __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
-  // Possible optimization: do a check that both values are smis
-  // (or them and test against Smi mask.)
-
-  __ movq(tmp2.reg(), tmp1.reg());
-  __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg());
-  __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg());
-  __ bind(&done);
-
-  deferred->BindExit();
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
-  Comment cmnt(masm_, "[ GenerateCallFunction");
-
-  ASSERT(args->length() >= 2);
-
-  int n_args = args->length() - 2;  // for receiver and function.
-  Load(args->at(0));  // receiver
-  for (int i = 0; i < n_args; i++) {
-    Load(args->at(i + 1));
-  }
-  Load(args->at(n_args + 1));  // function
-  Result result = frame_->CallJSFunction(n_args);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.pow method. Only handles special cases and
-// branches to the runtime system for everything else. Please note
-// that this function assumes that the callsite has executed ToNumber
-// on both arguments.
-void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 2);
-  Load(args->at(0));
-  Load(args->at(1));
-
-  Label allocate_return;
-  // Load the two operands while leaving the values on the frame.
-  frame()->Dup();
-  Result exponent = frame()->Pop();
-  exponent.ToRegister();
-  frame()->Spill(exponent.reg());
-  frame()->PushElementAt(1);
-  Result base = frame()->Pop();
-  base.ToRegister();
-  frame()->Spill(base.reg());
-
-  Result answer = allocator()->Allocate();
-  ASSERT(answer.is_valid());
-  ASSERT(!exponent.reg().is(base.reg()));
-  JumpTarget call_runtime;
-
-  // Save 1 in xmm3 - we need this several times later on.
-  __ movl(answer.reg(), Immediate(1));
-  __ cvtlsi2sd(xmm3, answer.reg());
-
-  Label exponent_nonsmi;
-  Label base_nonsmi;
-  // If the exponent is a heap number go to that specific case.
-  __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
-  __ JumpIfNotSmi(base.reg(), &base_nonsmi);
-
-  // Optimized version when y is an integer.
-  Label powi;
-  __ SmiToInteger32(base.reg(), base.reg());
-  __ cvtlsi2sd(xmm0, base.reg());
-  __ jmp(&powi);
-  // exponent is smi and base is a heapnumber.
-  __ bind(&base_nonsmi);
-  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-
-  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-  // Optimized version of pow if y is an integer.
-  __ bind(&powi);
-  __ SmiToInteger32(exponent.reg(), exponent.reg());
-
-  // Save exponent in base as we need to check if exponent is negative later.
-  // We know that base and exponent are in different registers.
-  __ movl(base.reg(), exponent.reg());
-
-  // Get absolute value of exponent.
-  Label no_neg;
-  __ cmpl(exponent.reg(), Immediate(0));
-  __ j(greater_equal, &no_neg);
-  __ negl(exponent.reg());
-  __ bind(&no_neg);
-
-  // Load xmm1 with 1.
-  __ movsd(xmm1, xmm3);
-  Label while_true;
-  Label no_multiply;
-
-  __ bind(&while_true);
-  __ shrl(exponent.reg(), Immediate(1));
-  __ j(not_carry, &no_multiply);
-  __ mulsd(xmm1, xmm0);
-  __ bind(&no_multiply);
-  __ testl(exponent.reg(), exponent.reg());
-  __ mulsd(xmm0, xmm0);
-  __ j(not_zero, &while_true);
-
-  // x has the original value of y - if y is negative return 1/result.
-  __ testl(base.reg(), base.reg());
-  __ j(positive, &allocate_return);
-  // Special case if xmm1 has reached infinity.
-  __ movl(answer.reg(), Immediate(0x7FB00000));
-  __ movd(xmm0, answer.reg());
-  __ cvtss2sd(xmm0, xmm0);
-  __ ucomisd(xmm0, xmm1);
-  call_runtime.Branch(equal);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // exponent (or both) is a heapnumber - no matter what we should now work
-  // on doubles.
-  __ bind(&exponent_nonsmi);
-  __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-  __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
-  // Test if exponent is nan.
-  __ ucomisd(xmm1, xmm1);
-  call_runtime.Branch(parity_even);
-
-  Label base_not_smi;
-  Label handle_special_cases;
-  __ JumpIfNotSmi(base.reg(), &base_not_smi);
-  __ SmiToInteger32(base.reg(), base.reg());
-  __ cvtlsi2sd(xmm0, base.reg());
-  __ jmp(&handle_special_cases);
-  __ bind(&base_not_smi);
-  __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  call_runtime.Branch(not_equal);
-  __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
-  __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
-  __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
-  // base is NaN or +/-Infinity
-  call_runtime.Branch(greater_equal);
-  __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
-
-  // base is in xmm0 and exponent is in xmm1.
-  __ bind(&handle_special_cases);
-  Label not_minus_half;
-  // Test for -0.5.
-  // Load xmm2 with -0.5.
-  __ movl(answer.reg(), Immediate(0xBF000000));
-  __ movd(xmm2, answer.reg());
-  __ cvtss2sd(xmm2, xmm2);
-  // xmm2 now has -0.5.
-  __ ucomisd(xmm2, xmm1);
-  __ j(not_equal, &not_minus_half);
-
-  // Calculates reciprocal of square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-  __ divsd(xmm3, xmm1);
-  __ movsd(xmm1, xmm3);
-  __ jmp(&allocate_return);
-
-  // Test for 0.5.
-  __ bind(&not_minus_half);
-  // Load xmm2 with 0.5.
-  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
-  __ addsd(xmm2, xmm3);
-  // xmm2 now has 0.5.
-  __ ucomisd(xmm2, xmm1);
-  call_runtime.Branch(not_equal);
-
-  // Calculates square root.
-  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-  __ xorpd(xmm1, xmm1);
-  __ addsd(xmm1, xmm0);
-  __ sqrtsd(xmm1, xmm1);
-
-  JumpTarget done;
-  Label failure, success;
-  __ bind(&allocate_return);
-  // Make a copy of the frame to enable us to handle allocation
-  // failure after the JumpTarget jump.
-  VirtualFrame* clone = new VirtualFrame(frame());
-  __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
-  __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
-  // Remove the two original values from the frame - we only need those
-  // in the case where we branch to runtime.
-  frame()->Drop(2);
-  exponent.Unuse();
-  base.Unuse();
-  done.Jump(&answer);
-  // Use the copy of the original frame as our current frame.
-  RegisterFile empty_regs;
-  SetFrame(clone, &empty_regs);
-  // If we experience an allocation failure we branch to runtime.
-  __ bind(&failure);
-  call_runtime.Bind();
-  answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
-
-  done.Bind(&answer);
-  frame()->Push(&answer);
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
-  ASSERT_EQ(args->length(), 1);
-  Load(args->at(0));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::TAGGED);
-  Result result = frame_->CallStub(&stub, 1);
-  frame_->Push(&result);
-}
-
-
-// Generates the Math.sqrt method. Please note - this function assumes that
-// the callsite has executed ToNumber on the argument.
-void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-
-  // Leave original value on the frame if we need to call runtime.
-  frame()->Dup();
-  Result result = frame()->Pop();
-  result.ToRegister();
-  frame()->Spill(result.reg());
-  Label runtime;
-  Label non_smi;
-  Label load_done;
-  JumpTarget end;
-
-  __ JumpIfNotSmi(result.reg(), &non_smi);
-  __ SmiToInteger32(result.reg(), result.reg());
-  __ cvtlsi2sd(xmm0, result.reg());
-  __ jmp(&load_done);
-  __ bind(&non_smi);
-  __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
-                 Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &runtime);
-  __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
-
-  __ bind(&load_done);
-  __ sqrtsd(xmm0, xmm0);
-  // A copy of the virtual frame to allow us to go to runtime after the
-  // JumpTarget jump.
-  Result scratch = allocator()->Allocate();
-  VirtualFrame* clone = new VirtualFrame(frame());
-  __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
-
-  __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
-  frame()->Drop(1);
-  scratch.Unuse();
-  end.Jump(&result);
-  // We only branch to runtime if we have an allocation error.
-  // Use the copy of the original frame as our current frame.
-  RegisterFile empty_regs;
-  SetFrame(clone, &empty_regs);
-  __ bind(&runtime);
-  result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
-
-  end.Bind(&result);
-  frame()->Push(&result);
-}
-
-
-void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
-  ASSERT_EQ(2, args->length());
-  Load(args->at(0));
-  Load(args->at(1));
-  Result right_res = frame_->Pop();
-  Result left_res = frame_->Pop();
-  right_res.ToRegister();
-  left_res.ToRegister();
-  Result tmp_res = allocator()->Allocate();
-  ASSERT(tmp_res.is_valid());
-  Register right = right_res.reg();
-  Register left = left_res.reg();
-  Register tmp = tmp_res.reg();
-  right_res.Unuse();
-  left_res.Unuse();
-  tmp_res.Unuse();
-  __ cmpq(left, right);
-  destination()->true_target()->Branch(equal);
-  // Fail if either is a non-HeapObject.
-  Condition either_smi =
-      masm()->CheckEitherSmi(left, right, tmp);
-  destination()->false_target()->Branch(either_smi);
-  __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset));
-  __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset),
-          Immediate(JS_REGEXP_TYPE));
-  destination()->false_target()->Branch(not_equal);
-  __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset));
-  destination()->false_target()->Branch(not_equal);
-  __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset));
-  __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset));
-  destination()->Split(equal);
-}
-
-
-void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result value = frame_->Pop();
-  value.ToRegister();
-  ASSERT(value.is_valid());
-  __ testl(FieldOperand(value.reg(), String::kHashFieldOffset),
-           Immediate(String::kContainsCachedArrayIndexMask));
-  value.Unuse();
-  destination()->Split(zero);
-}
-
-
-void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
-  ASSERT(args->length() == 1);
-  Load(args->at(0));
-  Result string = frame_->Pop();
-  string.ToRegister();
-
-  Result number = allocator()->Allocate();
-  ASSERT(number.is_valid());
-  __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset));
-  __ IndexFromHash(number.reg(), number.reg());
-  string.Unuse();
-  frame_->Push(&number);
-}
-
-
-void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
-  frame_->Push(FACTORY->undefined_value());
-}
-
-
-void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
-  if (CheckForInlineRuntimeCall(node)) {
-    return;
-  }
-
-  ZoneList<Expression*>* args = node->arguments();
-  Comment cmnt(masm_, "[ CallRuntime");
-  const Runtime::Function* function = node->function();
-
-  if (function == NULL) {
-    // Push the builtins object found in the current global object.
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(), GlobalObjectOperand());
-    __ movq(temp.reg(),
-            FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
-    frame_->Push(&temp);
-  }
-
-  // Push the arguments ("left-to-right").
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    Load(args->at(i));
-  }
-
-  if (function == NULL) {
-    // Call the JS runtime function.
-    frame_->Push(node->name());
-    Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
-                                       arg_count,
-                                       loop_nesting_);
-    frame_->RestoreContextRegister();
-    frame_->Push(&answer);
-  } else {
-    // Call the C runtime function.
-    Result answer = frame_->CallRuntime(function, arg_count);
-    frame_->Push(&answer);
-  }
-}
-
-
-void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
-  Comment cmnt(masm_, "[ UnaryOperation");
-
-  Token::Value op = node->op();
-
-  if (op == Token::NOT) {
-    // Swap the true and false targets but keep the same actual label
-    // as the fall through.
-    destination()->Invert();
-    LoadCondition(node->expression(), destination(), true);
-    // Swap the labels back.
-    destination()->Invert();
-
-  } else if (op == Token::DELETE) {
-    Property* property = node->expression()->AsProperty();
-    if (property != NULL) {
-      Load(property->obj());
-      Load(property->key());
-      frame_->Push(Smi::FromInt(strict_mode_flag()));
-      Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3);
-      frame_->Push(&answer);
-      return;
-    }
-
-    Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
-    if (variable != NULL) {
-      // Delete of an unqualified identifier is disallowed in strict mode
-      // but "delete this" is.
-      ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
-      Slot* slot = variable->AsSlot();
-      if (variable->is_global()) {
-        LoadGlobal();
-        frame_->Push(variable->name());
-        frame_->Push(Smi::FromInt(kNonStrictMode));
-        Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
-                                              CALL_FUNCTION, 3);
-        frame_->Push(&answer);
-
-      } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-        // Call the runtime to delete from the context holding the named
-        // variable.  Sync the virtual frame eagerly so we can push the
-        // arguments directly into place.
-        frame_->SyncRange(0, frame_->element_count() - 1);
-        frame_->EmitPush(rsi);
-        frame_->EmitPush(variable->name());
-        Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
-        frame_->Push(&answer);
-      } else {
-        // Default: Result of deleting non-global, not dynamically
-        // introduced variables is false.
-        frame_->Push(FACTORY->false_value());
-      }
-    } else {
-      // Default: Result of deleting expressions is true.
-      Load(node->expression());  // may have side-effects
-      frame_->SetElementAt(0, FACTORY->true_value());
-    }
-
-  } else if (op == Token::TYPEOF) {
-    // Special case for loading the typeof expression; see comment on
-    // LoadTypeofExpression().
-    LoadTypeofExpression(node->expression());
-    Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
-    frame_->Push(&answer);
-
-  } else if (op == Token::VOID) {
-    Expression* expression = node->expression();
-    if (expression && expression->AsLiteral() && (
-        expression->AsLiteral()->IsTrue() ||
-        expression->AsLiteral()->IsFalse() ||
-        expression->AsLiteral()->handle()->IsNumber() ||
-        expression->AsLiteral()->handle()->IsString() ||
-        expression->AsLiteral()->handle()->IsJSRegExp() ||
-        expression->AsLiteral()->IsNull())) {
-      // Omit evaluating the value of the primitive literal.
-      // It will be discarded anyway, and can have no side effect.
-      frame_->Push(FACTORY->undefined_value());
-    } else {
-      Load(node->expression());
-      frame_->SetElementAt(0, FACTORY->undefined_value());
-    }
-
-  } else {
-    bool can_overwrite = node->expression()->ResultOverwriteAllowed();
-    UnaryOverwriteMode overwrite =
-        can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
-    bool no_negative_zero = node->expression()->no_negative_zero();
-    Load(node->expression());
-    switch (op) {
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-        UNREACHABLE();  // handled above
-        break;
-
-      case Token::SUB: {
-        GenericUnaryOpStub stub(
-            Token::SUB,
-            overwrite,
-            NO_UNARY_FLAGS,
-            no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
-        Result operand = frame_->Pop();
-        Result answer = frame_->CallStub(&stub, &operand);
-        answer.set_type_info(TypeInfo::Number());
-        frame_->Push(&answer);
-        break;
-      }
-
-      case Token::BIT_NOT: {
-        // Smi check.
-        JumpTarget smi_label;
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        operand.ToRegister();
-
-        Condition is_smi = masm_->CheckSmi(operand.reg());
-        smi_label.Branch(is_smi, &operand);
-
-        GenericUnaryOpStub stub(Token::BIT_NOT,
-                                overwrite,
-                                NO_UNARY_SMI_CODE_IN_STUB);
-        Result answer = frame_->CallStub(&stub, &operand);
-        continue_label.Jump(&answer);
-
-        smi_label.Bind(&answer);
-        answer.ToRegister();
-        frame_->Spill(answer.reg());
-        __ SmiNot(answer.reg(), answer.reg());
-        continue_label.Bind(&answer);
-        answer.set_type_info(TypeInfo::Smi());
-        frame_->Push(&answer);
-        break;
-      }
-
-      case Token::ADD: {
-        // Smi check.
-        JumpTarget continue_label;
-        Result operand = frame_->Pop();
-        TypeInfo operand_info = operand.type_info();
-        operand.ToRegister();
-        Condition is_smi = masm_->CheckSmi(operand.reg());
-        continue_label.Branch(is_smi, &operand);
-        frame_->Push(&operand);
-        Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
-                                              CALL_FUNCTION, 1);
-
-        continue_label.Bind(&answer);
-        if (operand_info.IsSmi()) {
-          answer.set_type_info(TypeInfo::Smi());
-        } else if (operand_info.IsInteger32()) {
-          answer.set_type_info(TypeInfo::Integer32());
-        } else {
-          answer.set_type_info(TypeInfo::Number());
-        }
-        frame_->Push(&answer);
-        break;
-      }
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged.  Call into the runtime
-// to convert the argument to a number, and call the specialized add
-// or subtract stub.  The result is left in dst.
-class DeferredPrefixCountOperation: public DeferredCode {
- public:
-  DeferredPrefixCountOperation(Register dst,
-                               bool is_increment,
-                               TypeInfo input_type)
-      : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPrefixCountOperation::Generate() {
-  Register left;
-  if (input_type_.IsNumber()) {
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    left = rax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-// The value in dst was optimistically incremented or decremented.
-// The result overflowed or was not smi tagged.  Call into the runtime
-// to convert the argument to a number.  Update the original value in
-// old.  Call the specialized add or subtract stub.  The result is
-// left in dst.
-class DeferredPostfixCountOperation: public DeferredCode {
- public:
-  DeferredPostfixCountOperation(Register dst,
-                                Register old,
-                                bool is_increment,
-                                TypeInfo input_type)
-      : dst_(dst),
-        old_(old),
-        is_increment_(is_increment),
-        input_type_(input_type) {
-    set_comment("[ DeferredCountOperation");
-  }
-
-  virtual void Generate();
-
- private:
-  Register dst_;
-  Register old_;
-  bool is_increment_;
-  TypeInfo input_type_;
-};
-
-
-void DeferredPostfixCountOperation::Generate() {
-  Register left;
-  if (input_type_.IsNumber()) {
-    __ push(dst_);  // Save the input to use as the old value.
-    left = dst_;
-  } else {
-    __ push(dst_);
-    __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-    __ push(rax);  // Save the result of ToNumber to use as the old value.
-    left = rax;
-  }
-
-  GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
-                           NO_OVERWRITE,
-                           NO_GENERIC_BINARY_FLAGS,
-                           TypeInfo::Number());
-  stub.GenerateCall(masm_, left, Smi::FromInt(1));
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-  __ pop(old_);
-}
-
-
-void CodeGenerator::VisitCountOperation(CountOperation* node) {
-  Comment cmnt(masm_, "[ CountOperation");
-
-  bool is_postfix = node->is_postfix();
-  bool is_increment = node->op() == Token::INC;
-
-  Variable* var = node->expression()->AsVariableProxy()->AsVariable();
-  bool is_const = (var != NULL && var->mode() == Variable::CONST);
-
-  // Postfix operations need a stack slot under the reference to hold
-  // the old value while the new value is being stored.  This is so that
-  // in the case that storing the new value requires a call, the old
-  // value will be in the frame to be spilled.
-  if (is_postfix) frame_->Push(Smi::FromInt(0));
-
-  // A constant reference is not saved to, so the reference is not a
-  // compound assignment reference.
-  { Reference target(this, node->expression(), !is_const);
-    if (target.is_illegal()) {
-      // Spoof the virtual frame to have the expected height (one higher
-      // than on entry).
-      if (!is_postfix) frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    target.TakeValue();
-
-    Result new_value = frame_->Pop();
-    new_value.ToRegister();
-
-    Result old_value;  // Only allocated in the postfix case.
-    if (is_postfix) {
-      // Allocate a temporary to preserve the old value.
-      old_value = allocator_->Allocate();
-      ASSERT(old_value.is_valid());
-      __ movq(old_value.reg(), new_value.reg());
-
-      // The return value for postfix operations is ToNumber(input).
-      // Keep more precise type info if the input is some kind of
-      // number already. If the input is not a number we have to wait
-      // for the deferred code to convert it.
-      if (new_value.type_info().IsNumber()) {
-        old_value.set_type_info(new_value.type_info());
-      }
-    }
-    // Ensure the new value is writable.
-    frame_->Spill(new_value.reg());
-
-    DeferredCode* deferred = NULL;
-    if (is_postfix) {
-      deferred = new DeferredPostfixCountOperation(new_value.reg(),
-                                                   old_value.reg(),
-                                                   is_increment,
-                                                   new_value.type_info());
-    } else {
-      deferred = new DeferredPrefixCountOperation(new_value.reg(),
-                                                  is_increment,
-                                                  new_value.type_info());
-    }
-
-    if (new_value.is_smi()) {
-      if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
-    } else {
-      __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
-    }
-    if (is_increment) {
-      __ SmiAddConstant(new_value.reg(),
-                        new_value.reg(),
-                        Smi::FromInt(1),
-                        deferred->entry_label());
-    } else {
-      __ SmiSubConstant(new_value.reg(),
-                        new_value.reg(),
-                        Smi::FromInt(1),
-                        deferred->entry_label());
-    }
-    deferred->BindExit();
-
-    // Postfix count operations return their input converted to
-    // number. The case when the input is already a number is covered
-    // above in the allocation code for old_value.
-    if (is_postfix && !new_value.type_info().IsNumber()) {
-      old_value.set_type_info(TypeInfo::Number());
-    }
-
-    new_value.set_type_info(TypeInfo::Number());
-
-    // Postfix: store the old value in the allocated slot under the
-    // reference.
-    if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
-
-    frame_->Push(&new_value);
-    // Non-constant: update the reference.
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
-  }
-
-  // Postfix: drop the new value and use the old.
-  if (is_postfix) frame_->Drop();
-}
-
-
-void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
-  // According to ECMA-262 section 11.11, page 58, the binary logical
-  // operators must yield the result of one of the two expressions
-  // before any ToBoolean() conversions. This means that the value
-  // produced by a && or || operator is not necessarily a boolean.
-
-  // NOTE: If the left hand side produces a materialized value (not
-  // control flow), we force the right hand side to do the same. This
-  // is necessary because we assume that if we get control flow on the
-  // last path out of an expression we got it on all paths.
-  if (node->op() == Token::AND) {
-    JumpTarget is_true;
-    ControlDestination dest(&is_true, destination()->false_target(), true);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.false_was_fall_through()) {
-      // The current false target was used as the fall-through.  If
-      // there are no dangling jumps to is_true then the left
-      // subexpression was unconditionally false.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_true.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current false target was a forward jump then we have a
-        // valid frame, we have just bound the false target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->false_target()->Unuse();
-          destination()->false_target()->Jump();
-        }
-        is_true.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have actually just jumped to or bound the current false
-        // target but the current control destination is not marked as
-        // used.
-        destination()->Use(false);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_true
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_true
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'false' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&pop_and_continue, &exit, true);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_true.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-
-  } else {
-    ASSERT(node->op() == Token::OR);
-    JumpTarget is_false;
-    ControlDestination dest(destination()->true_target(), &is_false, false);
-    LoadCondition(node->left(), &dest, false);
-
-    if (dest.true_was_fall_through()) {
-      // The current true target was used as the fall-through.  If
-      // there are no dangling jumps to is_false then the left
-      // subexpression was unconditionally true.  Otherwise we have
-      // paths where we do have to evaluate the right subexpression.
-      if (is_false.is_linked()) {
-        // We need to compile the right subexpression.  If the jump to
-        // the current true target was a forward jump then we have a
-        // valid frame, we have just bound the true target, and we
-        // have to jump around the code for the right subexpression.
-        if (has_valid_frame()) {
-          destination()->true_target()->Unuse();
-          destination()->true_target()->Jump();
-        }
-        is_false.Bind();
-        // The left subexpression compiled to control flow, so the
-        // right one is free to do so as well.
-        LoadCondition(node->right(), destination(), false);
-      } else {
-        // We have just jumped to or bound the current true target but
-        // the current control destination is not marked as used.
-        destination()->Use(true);
-      }
-
-    } else if (dest.is_used()) {
-      // The left subexpression compiled to control flow (and is_false
-      // was just bound), so the right is free to do so as well.
-      LoadCondition(node->right(), destination(), false);
-
-    } else {
-      // We have a materialized value on the frame, so we exit with
-      // one on all paths.  There are possibly also jumps to is_false
-      // from nested subexpressions.
-      JumpTarget pop_and_continue;
-      JumpTarget exit;
-
-      // Avoid popping the result if it converts to 'true' using the
-      // standard ToBoolean() conversion as described in ECMA-262,
-      // section 9.2, page 30.
-      //
-      // Duplicate the TOS value. The duplicate will be popped by
-      // ToBoolean.
-      frame_->Dup();
-      ControlDestination dest(&exit, &pop_and_continue, false);
-      ToBoolean(&dest);
-
-      // Pop the result of evaluating the first part.
-      frame_->Drop();
-
-      // Compile right side expression.
-      is_false.Bind();
-      Load(node->right());
-
-      // Exit (always with a materialized value).
-      exit.Bind();
-    }
-  }
-}
-
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-  Comment cmnt(masm_, "[ BinaryOperation");
-
-  if (node->op() == Token::AND || node->op() == Token::OR) {
-    GenerateLogicalBooleanOperation(node);
-  } else {
-    // NOTE: The code below assumes that the slow cases (calls to runtime)
-    // never return a constant/immutable object.
-    OverwriteMode overwrite_mode = NO_OVERWRITE;
-    if (node->left()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_LEFT;
-    } else if (node->right()->ResultOverwriteAllowed()) {
-      overwrite_mode = OVERWRITE_RIGHT;
-    }
-
-    if (node->left()->IsTrivial()) {
-      Load(node->right());
-      Result right = frame_->Pop();
-      frame_->Push(node->left());
-      frame_->Push(&right);
-    } else {
-      Load(node->left());
-      Load(node->right());
-    }
-    GenericBinaryOperation(node, overwrite_mode);
-  }
-}
-
-
-void CodeGenerator::VisitThisFunction(ThisFunction* node) {
-  frame_->PushFunction();
-}
-
-
-void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
-  Comment cmnt(masm_, "[ CompareOperation");
-
-  // Get the expressions from the node.
-  Expression* left = node->left();
-  Expression* right = node->right();
-  Token::Value op = node->op();
-  // To make typeof testing for natives implemented in JavaScript really
-  // efficient, we generate special code for expressions of the form:
-  // 'typeof <expression> == <string>'.
-  UnaryOperation* operation = left->AsUnaryOperation();
-  if ((op == Token::EQ || op == Token::EQ_STRICT) &&
-      (operation != NULL && operation->op() == Token::TYPEOF) &&
-      (right->AsLiteral() != NULL &&
-       right->AsLiteral()->handle()->IsString())) {
-    Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
-
-    // Load the operand and move it to a register.
-    LoadTypeofExpression(operation->expression());
-    Result answer = frame_->Pop();
-    answer.ToRegister();
-
-    if (check->Equals(HEAP->number_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->true_target()->Branch(is_smi);
-      frame_->Spill(answer.reg());
-      __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->string_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-
-      // It can be an undetectable string object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
-      answer.Unuse();
-      destination()->Split(below);  // Unsigned byte comparison needed.
-
-    } else if (check->Equals(HEAP->boolean_symbol())) {
-      __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
-      destination()->true_target()->Branch(equal);
-      __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->undefined_symbol())) {
-      __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
-      destination()->true_target()->Branch(equal);
-
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-
-      // It can be an undetectable object.
-      __ movq(kScratchRegister,
-              FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      answer.Unuse();
-      destination()->Split(not_zero);
-
-    } else if (check->Equals(HEAP->function_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-      frame_->Spill(answer.reg());
-      __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
-      destination()->true_target()->Branch(equal);
-      // Regular expressions are callable so typeof == 'function'.
-      __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
-      answer.Unuse();
-      destination()->Split(equal);
-
-    } else if (check->Equals(HEAP->object_symbol())) {
-      Condition is_smi = masm_->CheckSmi(answer.reg());
-      destination()->false_target()->Branch(is_smi);
-      __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
-      destination()->true_target()->Branch(equal);
-
-      // Regular expressions are typeof == 'function', not 'object'.
-      __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
-      destination()->false_target()->Branch(equal);
-
-      // It can be an undetectable object.
-      __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
-               Immediate(1 << Map::kIsUndetectable));
-      destination()->false_target()->Branch(not_zero);
-      __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
-      destination()->false_target()->Branch(below);
-      __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
-      answer.Unuse();
-      destination()->Split(below_equal);
-    } else {
-      // Uncommon case: typeof testing against a string literal that is
-      // never returned from the typeof operator.
-      answer.Unuse();
-      destination()->Goto(false);
-    }
-    return;
-  }
-
-  Condition cc = no_condition;
-  bool strict = false;
-  switch (op) {
-    case Token::EQ_STRICT:
-      strict = true;
-      // Fall through
-    case Token::EQ:
-      cc = equal;
-      break;
-    case Token::LT:
-      cc = less;
-      break;
-    case Token::GT:
-      cc = greater;
-      break;
-    case Token::LTE:
-      cc = less_equal;
-      break;
-    case Token::GTE:
-      cc = greater_equal;
-      break;
-    case Token::IN: {
-      Load(left);
-      Load(right);
-      Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
-      frame_->Push(&answer);  // push the result
-      return;
-    }
-    case Token::INSTANCEOF: {
-      Load(left);
-      Load(right);
-      InstanceofStub stub(InstanceofStub::kNoFlags);
-      Result answer = frame_->CallStub(&stub, 2);
-      answer.ToRegister();
-      __ testq(answer.reg(), answer.reg());
-      answer.Unuse();
-      destination()->Split(zero);
-      return;
-    }
-    default:
-      UNREACHABLE();
-  }
-
-  if (left->IsTrivial()) {
-    Load(right);
-    Result right_result = frame_->Pop();
-    frame_->Push(left);
-    frame_->Push(&right_result);
-  } else {
-    Load(left);
-    Load(right);
-  }
-
-  Comparison(node, cc, strict, destination());
-}
-
-
-void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
-  Comment cmnt(masm_, "[ CompareToNull");
-
-  Load(node->expression());
-  Result operand = frame_->Pop();
-  operand.ToRegister();
-  __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
-  if (node->is_strict()) {
-    operand.Unuse();
-    destination()->Split(equal);
-  } else {
-    // The 'null' value is only equal to 'undefined' if using non-strict
-    // comparisons.
-    destination()->true_target()->Branch(equal);
-    __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
-    destination()->true_target()->Branch(equal);
-    Condition is_smi = masm_->CheckSmi(operand.reg());
-    destination()->false_target()->Branch(is_smi);
-
-    // It can be an undetectable object.
-    // Use a scratch register in preference to spilling operand.reg().
-    Result temp = allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    __ movq(temp.reg(),
-            FieldOperand(operand.reg(), HeapObject::kMapOffset));
-    __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
-             Immediate(1 << Map::kIsUndetectable));
-    temp.Unuse();
-    operand.Unuse();
-    destination()->Split(not_zero);
-  }
-}
-
-
-#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() {
-  return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
-      && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
-      && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
-      && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
-      && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
-      && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
-      && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
-      && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
-      && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
-      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0));
-}
-#endif
-
-
-
-// Emit a LoadIC call to get the value from receiver and leave it in
-// dst.  The receiver register is restored after the call.
-class DeferredReferenceGetNamedValue: public DeferredCode {
- public:
-  DeferredReferenceGetNamedValue(Register dst,
-                                 Register receiver,
-                                 Handle<String> name)
-      : dst_(dst), receiver_(receiver),  name_(name) {
-    set_comment("[ DeferredReferenceGetNamedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Handle<String> name_;
-};
-
-
-void DeferredReferenceGetNamedValue::Generate() {
-  if (!receiver_.is(rax)) {
-    __ movq(rax, receiver_);
-  }
-  __ Move(rcx, name_);
-  Handle<Code> ic = Isolate::Current()->builtins()->LoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The call must be followed by a test rax instruction to indicate
-  // that the inobject property case was inlined.
-  //
-  // Store the delta to the map check instruction here in the test
-  // instruction.  Use masm_-> instead of the __ macro since the
-  // latter can't return a value.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->named_load_inline_miss(), 1);
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceGetKeyedValue: public DeferredCode {
- public:
-  explicit DeferredReferenceGetKeyedValue(Register dst,
-                                          Register receiver,
-                                          Register key)
-      : dst_(dst), receiver_(receiver), key_(key) {
-    set_comment("[ DeferredReferenceGetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Label patch_site_;
-  Register dst_;
-  Register receiver_;
-  Register key_;
-};
-
-
-void DeferredReferenceGetKeyedValue::Generate() {
-  if (receiver_.is(rdx)) {
-    if (!key_.is(rax)) {
-      __ movq(rax, key_);
-    }  // else do nothing.
-  } else if (receiver_.is(rax)) {
-    if (key_.is(rdx)) {
-      __ xchg(rax, rdx);
-    } else if (key_.is(rax)) {
-      __ movq(rdx, receiver_);
-    } else {
-      __ movq(rdx, receiver_);
-      __ movq(rax, key_);
-    }
-  } else if (key_.is(rax)) {
-    __ movq(rdx, receiver_);
-  } else {
-    __ movq(rax, key_);
-    __ movq(rdx, receiver_);
-  }
-  // Calculate the delta from the IC call instruction to the map check
-  // movq instruction in the inlined version.  This delta is stored in
-  // a test(rax, delta) instruction after the call so that we can find
-  // it in the IC initialization code and patch the movq instruction.
-  // This means that we cannot allow test instructions after calls to
-  // KeyedLoadIC stubs in other places.
-  Handle<Code> ic = Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instruction to the
-  // test instruction.  We use masm_-> directly here instead of the __
-  // macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  // TODO(X64): Consider whether it's worth switching the test to a
-  // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
-  // be generated normally.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_inline_miss(), 1);
-
-  if (!dst_.is(rax)) __ movq(dst_, rax);
-}
-
-
-class DeferredReferenceSetKeyedValue: public DeferredCode {
- public:
-  DeferredReferenceSetKeyedValue(Register value,
-                                 Register key,
-                                 Register receiver,
-                                 StrictModeFlag strict_mode)
-      : value_(value),
-        key_(key),
-        receiver_(receiver),
-        strict_mode_(strict_mode) {
-    set_comment("[ DeferredReferenceSetKeyedValue");
-  }
-
-  virtual void Generate();
-
-  Label* patch_site() { return &patch_site_; }
-
- private:
-  Register value_;
-  Register key_;
-  Register receiver_;
-  Label patch_site_;
-  StrictModeFlag strict_mode_;
-};
-
-
-void DeferredReferenceSetKeyedValue::Generate() {
-  Counters* counters = masm()->isolate()->counters();
-  __ IncrementCounter(counters->keyed_store_inline_miss(), 1);
-  // Move value, receiver, and key to registers rax, rdx, and rcx, as
-  // the IC stub expects.
-  // Move value to rax, using xchg if the receiver or key is in rax.
-  if (!value_.is(rax)) {
-    if (!receiver_.is(rax) && !key_.is(rax)) {
-      __ movq(rax, value_);
-    } else {
-      __ xchg(rax, value_);
-      // Update receiver_ and key_ if they are affected by the swap.
-      if (receiver_.is(rax)) {
-        receiver_ = value_;
-      } else if (receiver_.is(value_)) {
-        receiver_ = rax;
-      }
-      if (key_.is(rax)) {
-        key_ = value_;
-      } else if (key_.is(value_)) {
-        key_ = rax;
-      }
-    }
-  }
-  // Value is now in rax. Its original location is remembered in value_,
-  // and the value is restored to value_ before returning.
-  // The variables receiver_ and key_ are not preserved.
-  // Move receiver and key to rdx and rcx, swapping if necessary.
-  if (receiver_.is(rdx)) {
-    if (!key_.is(rcx)) {
-      __ movq(rcx, key_);
-    }  // Else everything is already in the right place.
-  } else if (receiver_.is(rcx)) {
-    if (key_.is(rdx)) {
-      __ xchg(rcx, rdx);
-    } else if (key_.is(rcx)) {
-      __ movq(rdx, receiver_);
-    } else {
-      __ movq(rdx, receiver_);
-      __ movq(rcx, key_);
-    }
-  } else if (key_.is(rcx)) {
-    __ movq(rdx, receiver_);
-  } else {
-    __ movq(rcx, key_);
-    __ movq(rdx, receiver_);
-  }
-
-  // Call the IC stub.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                    : Builtins::kKeyedStoreIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The delta from the start of the map-compare instructions (initial movq)
-  // to the test instruction.  We use masm_-> directly here instead of the
-  // __ macro because the macro sometimes uses macro expansion to turn
-  // into something that can't return a value.  This is encountered
-  // when doing generated code coverage tests.
-  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
-  // Here we use masm_-> instead of the __ macro because this is the
-  // instruction that gets patched and coverage code gets in the way.
-  masm_->testl(rax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC).
-  if (!value_.is(rax)) __ movq(value_, rax);
-}
-
-
-Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Do not inline the inobject property case for loads from the global
-  // object.  Also do not inline for unoptimized code.  This saves time
-  // in the code generator.  Unoptimized code is toplevel code or code
-  // that is not in a loop.
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-    Comment cmnt(masm(), "[ Load from named Property");
-    frame()->Push(name);
-
-    RelocInfo::Mode mode = is_contextual
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    result = frame()->CallLoadIC(mode);
-    // A test rax instruction following the call signals that the
-    // inobject property case was inlined.  Ensure that there is not
-    // a test rax instruction here.
-    __ nop();
-  } else {
-    // Inline the inobject property case.
-    Comment cmnt(masm(), "[ Inlined named property load");
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-
-    // r12 is now a reserved register, so it cannot be the receiver.
-    // If it was, the distance to the fixup location would not be constant.
-    ASSERT(!receiver.reg().is(r12));
-
-    DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
-
-    // Check that the receiver is a heap object.
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    __ bind(deferred->patch_site());
-    // This is the map check instruction that will be patched (so we can't
-    // use the double underscore macro that may insert instructions).
-    // Initially use an invalid map to force a failure.
-    masm()->movq(kScratchRegister, FACTORY->null_value(),
-                 RelocInfo::EMBEDDED_OBJECT);
-    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                 kScratchRegister);
-    // This branch is always a forwards branch so it's always a fixed
-    // size which allows the assert below to succeed and patching to work.
-    // Don't use deferred->Branch(...), since that might add coverage code.
-    masm()->j(not_equal, deferred->entry_label());
-
-    // The delta from the patch label to the load offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-           LoadIC::kOffsetToLoadInstruction);
-    // The initial (invalid) offset has to be large enough to force
-    // a 32-bit instruction encoding to allow patching with an
-    // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
-
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->named_load_inline(), 1);
-    deferred->BindExit();
-  }
-  ASSERT(frame()->height() == original_height - 1);
-  return result;
-}
-
-
-Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
-#ifdef DEBUG
-  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
-#endif
-
-  Result result;
-  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
-      result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-      // A test rax instruction following the call signals that the inobject
-      // property case was inlined.  Ensure that there is not a test rax
-      // instruction here.
-      __ nop();
-  } else {
-    // Inline the in-object property case.
-    JumpTarget slow, done;
-    Label patch_site;
-
-    // Get the value and receiver from the stack.
-    Result value = frame()->Pop();
-    value.ToRegister();
-    Result receiver = frame()->Pop();
-    receiver.ToRegister();
-
-    // Allocate result register.
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
-
-    // Cannot use r12 for receiver, because that changes
-    // the distance between a call and a fixup location,
-    // due to a special encoding of r12 as r/m in a ModR/M byte.
-    if (receiver.reg().is(r12)) {
-      frame()->Spill(receiver.reg());  // It will be overwritten with result.
-      // Swap receiver and value.
-      __ movq(result.reg(), receiver.reg());
-      Result temp = receiver;
-      receiver = result;
-      result = temp;
-    }
-
-    // Check that the receiver is a heap object.
-    Condition is_smi = masm()->CheckSmi(receiver.reg());
-    slow.Branch(is_smi, &value, &receiver);
-
-    // This is the map check instruction that will be patched.
-    // Initially use an invalid map to force a failure. The exact
-    // instruction sequence is important because we use the
-    // kOffsetToStoreInstruction constant for patching. We avoid using
-    // the __ macro for the following two instructions because it
-    // might introduce extra instructions.
-    __ bind(&patch_site);
-    masm()->movq(kScratchRegister, FACTORY->null_value(),
-                 RelocInfo::EMBEDDED_OBJECT);
-    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                 kScratchRegister);
-    // This branch is always a forwards branch so it's always a fixed size
-    // which allows the assert below to succeed and patching to work.
-    slow.Branch(not_equal, &value, &receiver);
-
-    // The delta from the patch label to the store offset must be
-    // statically known.
-    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
-           StoreIC::kOffsetToStoreInstruction);
-
-    // The initial (invalid) offset has to be large enough to force a 32-bit
-    // instruction encoding to allow patching with an arbitrary offset.  Use
-    // kMaxInt (minus kHeapObjectTag).
-    int offset = kMaxInt;
-    __ movq(FieldOperand(receiver.reg(), offset), value.reg());
-    __ movq(result.reg(), value.reg());
-
-    // Allocate scratch register for write barrier.
-    Result scratch = allocator()->Allocate();
-    ASSERT(scratch.is_valid());
-
-    // The write barrier clobbers all input registers, so spill the
-    // receiver and the value.
-    frame_->Spill(receiver.reg());
-    frame_->Spill(value.reg());
-
-    // If the receiver and the value share a register allocate a new
-    // register for the receiver.
-    if (receiver.reg().is(value.reg())) {
-      receiver = allocator()->Allocate();
-      ASSERT(receiver.is_valid());
-      __ movq(receiver.reg(), value.reg());
-    }
-
-    // Update the write barrier. To save instructions in the inlined
-    // version we do not filter smis.
-    Label skip_write_barrier;
-    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
-    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
-    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
-    if (FLAG_debug_code) {
-      __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-      __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-      __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    }
-    __ bind(&skip_write_barrier);
-    value.Unuse();
-    scratch.Unuse();
-    receiver.Unuse();
-    done.Jump(&result);
-
-    slow.Bind(&value, &receiver);
-    frame()->Push(&receiver);
-    frame()->Push(&value);
-    result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
-    // Encode the offset to the map check instruction and the offset
-    // to the write barrier store address computation in a test rax
-    // instruction.
-    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
-    __ testl(rax,
-             Immediate((delta_to_record_write << 16) | delta_to_patch_site));
-    done.Bind(&result);
-  }
-
-  ASSERT_EQ(expected_height, frame()->height());
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedLoad() {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Inline array load code if inside of a loop.  We do not know
-  // the receiver map yet, so we initially generate the code with
-  // a check against an invalid map.  In the inline cache code, we
-  // patch the map check if appropriate.
-  if (loop_nesting() > 0) {
-    Comment cmnt(masm_, "[ Inlined load from keyed Property");
-
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    // Allocate the temporary early so that we use rax if it is free.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    // If key and receiver are shared registers on the frame, their values will
-    // be automatically saved and restored when going to deferred code.
-    // The result is returned in elements, which is not shared.
-    DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(elements.reg(),
-                                           receiver.reg(),
-                                           key.reg());
-
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    // Check that the receiver has the expected map.
-    // Initially, use an invalid map. The map is patched in the IC
-    // initialization code.
-    __ bind(deferred->patch_site());
-    // Use masm-> here instead of the double underscore macro since extra
-    // coverage code can interfere with the patching.  Do not use a load
-    // from the root array to load null_value, since the load must be patched
-    // with the expected receiver map, which is not in the root array.
-    masm_->movq(kScratchRegister, FACTORY->null_value(),
-                RelocInfo::EMBEDDED_OBJECT);
-    masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                kScratchRegister);
-    deferred->Branch(not_equal);
-
-    __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label());
-
-    // Get the elements array from the receiver.
-    __ movq(elements.reg(),
-            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ AssertFastElements(elements.reg());
-
-    // Check that key is within bounds.
-    __ SmiCompare(key.reg(),
-                  FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
-    // Load and check that the result is not the hole.  We could
-    // reuse the index or elements register for the value.
-    //
-    // TODO(206): Consider whether it makes sense to try some
-    // heuristic about which register to reuse.  For example, if
-    // one is rax, the we can reuse that one because the value
-    // coming from the deferred code will be in rax.
-    SmiIndex index =
-        masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-    __ movq(elements.reg(),
-            FieldOperand(elements.reg(),
-                         index.reg,
-                         index.scale,
-                         FixedArray::kHeaderSize));
-    result = elements;
-    __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
-    deferred->Branch(equal);
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->keyed_load_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    Comment cmnt(masm_, "[ Load from keyed Property");
-    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed load.  The explicit nop instruction is here because
-    // the push that follows might be peep-hole optimized away.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 2);
-  return result;
-}
-
-
-Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-#ifdef DEBUG
-  int original_height = frame()->height();
-#endif
-  Result result;
-  // Generate inlined version of the keyed store if the code is in a loop
-  // and the key is likely to be a smi.
-  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
-    Comment cmnt(masm(), "[ Inlined store to keyed Property");
-
-    // Get the receiver, key and value into registers.
-    result = frame()->Pop();
-    Result key = frame()->Pop();
-    Result receiver = frame()->Pop();
-
-    Result tmp = allocator_->Allocate();
-    ASSERT(tmp.is_valid());
-    Result tmp2 = allocator_->Allocate();
-    ASSERT(tmp2.is_valid());
-
-    // Determine whether the value is a constant before putting it in a
-    // register.
-    bool value_is_constant = result.is_constant();
-
-    // Make sure that value, key and receiver are in registers.
-    result.ToRegister();
-    key.ToRegister();
-    receiver.ToRegister();
-
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue(result.reg(),
-                                           key.reg(),
-                                           receiver.reg(),
-                                           strict_mode_flag());
-
-    // Check that the receiver is not a smi.
-    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-    // Check that the key is a smi.
-    if (!key.is_smi()) {
-      __ JumpIfNotSmi(key.reg(), deferred->entry_label());
-    } else if (FLAG_debug_code) {
-      __ AbortIfNotSmi(key.reg());
-    }
-
-    // Check that the receiver is a JSArray.
-    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
-    deferred->Branch(not_equal);
-
-    // Get the elements array from the receiver and check that it is not a
-    // dictionary.
-    __ movq(tmp.reg(),
-            FieldOperand(receiver.reg(), JSArray::kElementsOffset));
-
-    // Check whether it is possible to omit the write barrier. If the elements
-    // array is in new space or the value written is a smi we can safely update
-    // the elements array without write barrier.
-    Label in_new_space;
-    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
-    if (!value_is_constant) {
-      __ JumpIfNotSmi(result.reg(), deferred->entry_label());
-    }
-
-    __ bind(&in_new_space);
-    // Bind the deferred code patch site to be able to locate the fixed
-    // array map comparison.  When debugging, we patch this comparison to
-    // always fail so that we will hit the IC call in the deferred code
-    // which will allow the debugger to break for fast case stores.
-    __ bind(deferred->patch_site());
-    // Avoid using __ to ensure the distance from patch_site
-    // to the map address is always the same.
-    masm()->movq(kScratchRegister, FACTORY->fixed_array_map(),
-               RelocInfo::EMBEDDED_OBJECT);
-    __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-            kScratchRegister);
-    deferred->Branch(not_equal);
-
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis (because the fixed array check above ensures the
-    // elements are in fast case). Use unsigned comparison to handle negative
-    // keys.
-    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
-                  key.reg());
-    deferred->Branch(below_equal);
-
-    // Store the value.
-    SmiIndex index =
-        masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-    __ movq(FieldOperand(tmp.reg(),
-                         index.reg,
-                         index.scale,
-                         FixedArray::kHeaderSize),
-            result.reg());
-    Counters* counters = masm()->isolate()->counters();
-    __ IncrementCounter(counters->keyed_store_inline(), 1);
-
-    deferred->BindExit();
-  } else {
-    result = frame()->CallKeyedStoreIC(strict_mode_flag());
-    // Make sure that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to
-    // indicate that we have generated an inline version of the
-    // keyed store.
-    __ nop();
-  }
-  ASSERT(frame()->height() == original_height - 3);
-  return result;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-Handle<String> Reference::GetName() {
-  ASSERT(type_ == NAMED);
-  Property* property = expression_->AsProperty();
-  if (property == NULL) {
-    // Global variable reference treated as a named property reference.
-    VariableProxy* proxy = expression_->AsVariableProxy();
-    ASSERT(proxy->AsVariable() != NULL);
-    ASSERT(proxy->AsVariable()->is_global());
-    return proxy->name();
-  } else {
-    Literal* raw_name = property->key()->AsLiteral();
-    ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
-  }
-}
-
-
-void Reference::GetValue() {
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-
-  // Record the source position for the property load.
-  Property* property = expression_->AsProperty();
-  if (property != NULL) {
-    cgen_->CodeForSourcePosition(property->position());
-  }
-
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Load from Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-      break;
-    }
-
-    case NAMED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      if (persist_after_get_) {
-        cgen_->frame()->Dup();
-      }
-      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
-      cgen_->frame()->Push(&result);
-      break;
-    }
-
-    case KEYED: {
-      // A load of a bare identifier (load from global) cannot be keyed.
-      ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
-      if (persist_after_get_) {
-        cgen_->frame()->PushElementAt(1);
-        cgen_->frame()->PushElementAt(1);
-      }
-      Result value = cgen_->EmitKeyedLoad();
-      cgen_->frame()->Push(&value);
-      break;
-    }
-
-    default:
-      UNREACHABLE();
-  }
-
-  if (!persist_after_get_) {
-    set_unloaded();
-  }
-}
-
-
-void Reference::TakeValue() {
-  // TODO(X64): This function is completely architecture independent. Move
-  // it somewhere shared.
-
-  // For non-constant frame-allocated slots, we invalidate the value in the
-  // slot.  For all others, we fall back on GetValue.
-  ASSERT(!cgen_->in_spilled_code());
-  ASSERT(!is_illegal());
-  if (type_ != SLOT) {
-    GetValue();
-    return;
-  }
-
-  Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-  ASSERT(slot != NULL);
-  if (slot->type() == Slot::LOOKUP ||
-      slot->type() == Slot::CONTEXT ||
-      slot->var()->mode() == Variable::CONST ||
-      slot->is_arguments()) {
-    GetValue();
-    return;
-  }
-
-  // Only non-constant, frame-allocated parameters and locals can reach
-  // here.  Be careful not to use the optimizations for arguments
-  // object access since it may not have been initialized yet.
-  ASSERT(!slot->is_arguments());
-  if (slot->type() == Slot::PARAMETER) {
-    cgen_->frame()->TakeParameterAt(slot->index());
-  } else {
-    ASSERT(slot->type() == Slot::LOCAL);
-    cgen_->frame()->TakeLocalAt(slot->index());
-  }
-
-  ASSERT(persist_after_get_);
-  // Do not unload the reference, because it is used in SetValue.
-}
-
-
-void Reference::SetValue(InitState init_state) {
-  ASSERT(cgen_->HasValidEntryRegisters());
-  ASSERT(!is_illegal());
-  MacroAssembler* masm = cgen_->masm();
-  switch (type_) {
-    case SLOT: {
-      Comment cmnt(masm, "[ Store to Slot");
-      Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot();
-      ASSERT(slot != NULL);
-      cgen_->StoreToSlot(slot, init_state);
-      set_unloaded();
-      break;
-    }
-
-    case NAMED: {
-      Comment cmnt(masm, "[ Store to named Property");
-      Result answer = cgen_->EmitNamedStore(GetName(), false);
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case KEYED: {
-      Comment cmnt(masm, "[ Store to keyed Property");
-      Property* property = expression()->AsProperty();
-      ASSERT(property != NULL);
-
-      Result answer = cgen_->EmitKeyedStore(property->key()->type());
-      cgen_->frame()->Push(&answer);
-      set_unloaded();
-      break;
-    }
-
-    case UNLOADED:
-    case ILLEGAL:
-      UNREACHABLE();
-  }
-}
-
-
-Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                                      Result* left,
-                                                      Result* right) {
-  if (stub->ArgsInRegistersSupported()) {
-    stub->SetArgsInRegisters();
-    return frame_->CallStub(stub, left, right);
-  } else {
-    frame_->Push(left);
-    frame_->Push(right);
-    return frame_->CallStub(stub, 2);
-  }
-}
-
-#undef __
-
 #define __ masm.
 
 #ifdef _WIN64
@@ -8758,7 +58,7 @@
                                                  &actual_size,
                                                  true));
   CHECK(buffer);
-  Assembler masm(buffer, static_cast<int>(actual_size));
+  Assembler masm(NULL, buffer, static_cast<int>(actual_size));
   // Generated code is put into a fixed, unmovable, buffer, and not into
   // the V8 heap. We can't, and don't, refer to any relocatable addresses
   // (e.g. the JavaScript nan-object).
@@ -8832,7 +132,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  // Call the function from C++.
+  // Call the function from C++ through this pointer.
   return FUNCTION_CAST<ModuloFunction>(buffer);
 }
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 9a70907..94c7850 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -30,270 +30,17 @@
 
 #include "ast.h"
 #include "ic-inl.h"
-#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations
 class CompilationInfo;
-class DeferredCode;
-class RegisterAllocator;
-class RegisterFile;
 
-enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 
 // -------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that puts a
-// reference on the virtual frame.  The reference may be consumed
-// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
-// When the lifetime (scope) of a valid reference ends, it must have
-// been consumed, and be in state UNLOADED.
-class Reference BASE_EMBEDDED {
- public:
-  // The values of the types is important, see size().
-  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-
-  Reference(CodeGenerator* cgen,
-            Expression* expression,
-            bool persist_after_get = false);
-  ~Reference();
-
-  Expression* expression() const { return expression_; }
-  Type type() const { return type_; }
-  void set_type(Type value) {
-    ASSERT_EQ(ILLEGAL, type_);
-    type_ = value;
-  }
-
-  void set_unloaded() {
-    ASSERT_NE(ILLEGAL, type_);
-    ASSERT_NE(UNLOADED, type_);
-    type_ = UNLOADED;
-  }
-  // The size the reference takes up on the stack.
-  int size() const {
-    return (type_ < SLOT) ? 0 : type_;
-  }
-
-  bool is_illegal() const { return type_ == ILLEGAL; }
-  bool is_slot() const { return type_ == SLOT; }
-  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
-  bool is_unloaded() const { return type_ == UNLOADED; }
-
-  // Return the name.  Only valid for named property references.
-  Handle<String> GetName();
-
-  // Generate code to push the value of the reference on top of the
-  // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is consumed by the call unless the
-  // reference is for a compound assignment.
-  // If the reference is not consumed, it is left in place under its value.
-  void GetValue();
-
-  // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  The value of the reference may be invalidated,
-  // causing subsequent attempts to read it to fail.
-  void TakeValue();
-
-  // Generate code to store the value on top of the expression stack in the
-  // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The  value is stored in the location specified
-  // by the reference, and is left on top of the stack, after the reference
-  // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
-
- private:
-  CodeGenerator* cgen_;
-  Expression* expression_;
-  Type type_;
-  bool persist_after_get_;
-};
-
-
-// -------------------------------------------------------------------------
-// Control destinations.
-
-// A control destination encapsulates a pair of jump targets and a
-// flag indicating which one is the preferred fall-through.  The
-// preferred fall-through must be unbound, the other may be already
-// bound (ie, a backward target).
-//
-// The true and false targets may be jumped to unconditionally or
-// control may split conditionally.  Unconditional jumping and
-// splitting should be emitted in tail position (as the last thing
-// when compiling an expression) because they can cause either label
-// to be bound or the non-fall through to be jumped to leaving an
-// invalid virtual frame.
-//
-// The labels in the control destination can be extracted and
-// manipulated normally without affecting the state of the
-// destination.
-
-class ControlDestination BASE_EMBEDDED {
- public:
-  ControlDestination(JumpTarget* true_target,
-                     JumpTarget* false_target,
-                     bool true_is_fall_through)
-      : true_target_(true_target),
-        false_target_(false_target),
-        true_is_fall_through_(true_is_fall_through),
-        is_used_(false) {
-    ASSERT(true_is_fall_through ? !true_target->is_bound()
-                                : !false_target->is_bound());
-  }
-
-  // Accessors for the jump targets.  Directly jumping or branching to
-  // or binding the targets will not update the destination's state.
-  JumpTarget* true_target() const { return true_target_; }
-  JumpTarget* false_target() const { return false_target_; }
-
-  // True if the the destination has been jumped to unconditionally or
-  // control has been split to both targets.  This predicate does not
-  // test whether the targets have been extracted and manipulated as
-  // raw jump targets.
-  bool is_used() const { return is_used_; }
-
-  // True if the destination is used and the true target (respectively
-  // false target) was the fall through.  If the target is backward,
-  // "fall through" included jumping unconditionally to it.
-  bool true_was_fall_through() const {
-    return is_used_ && true_is_fall_through_;
-  }
-
-  bool false_was_fall_through() const {
-    return is_used_ && !true_is_fall_through_;
-  }
-
-  // Emit a branch to one of the true or false targets, and bind the
-  // other target.  Because this binds the fall-through target, it
-  // should be emitted in tail position (as the last thing when
-  // compiling an expression).
-  void Split(Condition cc) {
-    ASSERT(!is_used_);
-    if (true_is_fall_through_) {
-      false_target_->Branch(NegateCondition(cc));
-      true_target_->Bind();
-    } else {
-      true_target_->Branch(cc);
-      false_target_->Bind();
-    }
-    is_used_ = true;
-  }
-
-  // Emit an unconditional jump in tail position, to the true target
-  // (if the argument is true) or the false target.  The "jump" will
-  // actually bind the jump target if it is forward, jump to it if it
-  // is backward.
-  void Goto(bool where) {
-    ASSERT(!is_used_);
-    JumpTarget* target = where ? true_target_ : false_target_;
-    if (target->is_bound()) {
-      target->Jump();
-    } else {
-      target->Bind();
-    }
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Mark this jump target as used as if Goto had been called, but
-  // without generating a jump or binding a label (the control effect
-  // should have already happened).  This is used when the left
-  // subexpression of the short-circuit boolean operators are
-  // compiled.
-  void Use(bool where) {
-    ASSERT(!is_used_);
-    ASSERT((where ? true_target_ : false_target_)->is_bound());
-    is_used_ = true;
-    true_is_fall_through_ = where;
-  }
-
-  // Swap the true and false targets but keep the same actual label as
-  // the fall through.  This is used when compiling negated
-  // expressions, where we want to swap the targets but preserve the
-  // state.
-  void Invert() {
-    JumpTarget* temp_target = true_target_;
-    true_target_ = false_target_;
-    false_target_ = temp_target;
-
-    true_is_fall_through_ = !true_is_fall_through_;
-  }
-
- private:
-  // True and false jump targets.
-  JumpTarget* true_target_;
-  JumpTarget* false_target_;
-
-  // Before using the destination: true if the true target is the
-  // preferred fall through, false if the false target is.  After
-  // using the destination: true if the true target was actually used
-  // as the fall through, false if the false target was.
-  bool true_is_fall_through_;
-
-  // True if the Split or Goto functions have been called.
-  bool is_used_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator (and back up, in
-// the form of the state of the jump target pair).  It is threaded through
-// the call stack.  Constructing a state implicitly pushes it on the owning
-// code generator's stack of states, and destroying one implicitly pops it.
-//
-// The code generator state is only used for expressions, so statements have
-// the initial state.
-
-class CodeGenState BASE_EMBEDDED {
- public:
-  // Create an initial code generator state.  Destroying the initial state
-  // leaves the code generator with a NULL state.
-  explicit CodeGenState(CodeGenerator* owner);
-
-  // Create a code generator state based on a code generator's current
-  // state.  The new state has its own control destination.
-  CodeGenState(CodeGenerator* owner, ControlDestination* destination);
-
-  // Destroy a code generator state and restore the owning code generator's
-  // previous state.
-  ~CodeGenState();
-
-  // Accessors for the state.
-  ControlDestination* destination() const { return destination_; }
-
- private:
-  // The owning code generator.
-  CodeGenerator* owner_;
-
-  // A control destination in case the expression has a control-flow
-  // effect.
-  ControlDestination* destination_;
-
-  // The previous state of the owning code generator, restored when
-  // this state is destroyed.
-  CodeGenState* previous_;
-};
-
-
-// -------------------------------------------------------------------------
-// Arguments allocation mode
-
-enum ArgumentsAllocationMode {
-  NO_ARGUMENTS_ALLOCATION,
-  EAGER_ARGUMENTS_ALLOCATION,
-  LAZY_ARGUMENTS_ALLOCATION
-};
-
-
-// -------------------------------------------------------------------------
 // CodeGenerator
 
 class CodeGenerator: public AstVisitor {
@@ -319,431 +66,7 @@
                               int pos,
                               bool right_here = false);
 
-  // Accessors
-  MacroAssembler* masm() { return masm_; }
-  VirtualFrame* frame() const { return frame_; }
-  inline Handle<Script> script();
-
-  bool has_valid_frame() const { return frame_ != NULL; }
-
-  // Set the virtual frame to be new_frame, with non-frame register
-  // reference counts given by non_frame_registers.  The non-frame
-  // register reference counts of the old frame are returned in
-  // non_frame_registers.
-  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
-
-  void DeleteFrame();
-
-  RegisterAllocator* allocator() const { return allocator_; }
-
-  CodeGenState* state() { return state_; }
-  void set_state(CodeGenState* state) { state_ = state; }
-
-  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
-
-  bool in_spilled_code() const { return in_spilled_code_; }
-  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
-
  private:
-  // Type of a member function that generates inline code for a native function.
-  typedef void (CodeGenerator::*InlineFunctionGenerator)
-      (ZoneList<Expression*>*);
-
-  static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
-  // Construction/Destruction
-  explicit CodeGenerator(MacroAssembler* masm);
-
-  // Accessors
-  inline bool is_eval();
-  inline Scope* scope();
-  inline bool is_strict_mode();
-  inline StrictModeFlag strict_mode_flag();
-
-  // Generating deferred code.
-  void ProcessDeferred();
-
-  // State
-  ControlDestination* destination() const { return state_->destination(); }
-
-  // Track loop nesting level.
-  int loop_nesting() const { return loop_nesting_; }
-  void IncrementLoopNesting() { loop_nesting_++; }
-  void DecrementLoopNesting() { loop_nesting_--; }
-
-
-  // Node visitors.
-  void VisitStatements(ZoneList<Statement*>* statements);
-
-  virtual void VisitSlot(Slot* node);
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  // Visit a statement and then spill the virtual frame if control flow can
-  // reach the end of the statement (ie, it does not exit via break,
-  // continue, return, or throw).  This function is used temporarily while
-  // the code generator is being transformed.
-  void VisitAndSpill(Statement* statement);
-
-  // Visit a list of statements and then spill the virtual frame if control
-  // flow can reach the end of the list.
-  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
-  // Main code generation function
-  void Generate(CompilationInfo* info);
-
-  // Generate the return sequence code.  Should be called no more than
-  // once per compiled function, immediately after binding the return
-  // target (which can not be done more than once).
-  void GenerateReturnSequence(Result* return_value);
-
-  // Generate code for a fast smi loop.
-  void GenerateFastSmiLoop(ForStatement* node);
-
-  // Returns the arguments allocation mode.
-  ArgumentsAllocationMode ArgumentsMode();
-
-  // Store the arguments object and allocate it if necessary.
-  Result StoreArgumentsObject(bool initial);
-
-  // The following are used by class Reference.
-  void LoadReference(Reference* ref);
-  void UnloadReference(Reference* ref);
-
-  Operand SlotOperand(Slot* slot, Register tmp);
-
-  Operand ContextSlotOperandCheckExtensions(Slot* slot,
-                                            Result tmp,
-                                            JumpTarget* slow);
-
-  // Expressions
-  void LoadCondition(Expression* x,
-                     ControlDestination* destination,
-                     bool force_control);
-  void Load(Expression* expr);
-  void LoadGlobal();
-  void LoadGlobalReceiver();
-
-  // Generate code to push the value of an expression on top of the frame
-  // and then spill the frame fully to memory.  This function is used
-  // temporarily while the code generator is being transformed.
-  void LoadAndSpill(Expression* expression);
-
-  // Read a value from a slot and leave it on top of the expression stack.
-  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
-  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
-                                           TypeofState typeof_state,
-                                           JumpTarget* slow);
-
-  // Support for loading from local/global variables and arguments
-  // whose location is known unless they are shadowed by
-  // eval-introduced bindings. Generates no code for unsupported slot
-  // types and therefore expects to fall through to the slow jump target.
-  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
-                                       TypeofState typeof_state,
-                                       Result* result,
-                                       JumpTarget* slow,
-                                       JumpTarget* done);
-
-  // Store the value on top of the expression stack into a slot, leaving the
-  // value in place.
-  void StoreToSlot(Slot* slot, InitState init_state);
-
-  // Support for compiling assignment expressions.
-  void EmitSlotAssignment(Assignment* node);
-  void EmitNamedPropertyAssignment(Assignment* node);
-  void EmitKeyedPropertyAssignment(Assignment* node);
-
-  // Receiver is passed on the frame and not consumed.
-  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
-
-  // If the store is contextual, value is passed on the frame and consumed.
-  // Otherwise, receiver and value are passed on the frame and consumed.
-  Result EmitNamedStore(Handle<String> name, bool is_contextual);
-
-  // Load a property of an object, returning it in a Result.
-  // The object and the property name are passed on the stack, and
-  // not changed.
-  Result EmitKeyedLoad();
-
-  // Receiver, key, and value are passed on the frame and consumed.
-  Result EmitKeyedStore(StaticType* key_type);
-
-  // Special code for typeof expressions: Unfortunately, we must
-  // be careful when loading the expression in 'typeof'
-  // expressions. We are not allowed to throw reference errors for
-  // non-existing properties of the global object, so we must make it
-  // look like an explicit property access, instead of an access
-  // through the context chain.
-  void LoadTypeofExpression(Expression* x);
-
-  // Translate the value on top of the frame into control flow to the
-  // control destination.
-  void ToBoolean(ControlDestination* destination);
-
-  // Generate code that computes a shortcutting logical operation.
-  void GenerateLogicalBooleanOperation(BinaryOperation* node);
-
-  void GenericBinaryOperation(BinaryOperation* expr,
-                              OverwriteMode overwrite_mode);
-
-  // Generate a stub call from the virtual frame.
-  Result GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub,
-                                         Result* left,
-                                         Result* right);
-
-  // Emits code sequence that jumps to a JumpTarget if the inputs
-  // are both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfBothSmiUsingTypeInfo(Result* left,
-                                  Result* right,
-                                  JumpTarget* both_smi);
-
-  // Emits code sequence that jumps to deferred code if the input
-  // is not a smi.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfNotSmiUsingTypeInfo(Register reg,
-                                 TypeInfo type,
-                                 DeferredCode* deferred);
-
-  // Emits code sequence that jumps to deferred code if the inputs
-  // are not both smis.  Cannot be in MacroAssembler because it takes
-  // advantage of TypeInfo to skip unneeded checks.
-  void JumpIfNotBothSmiUsingTypeInfo(Register left,
-                                     Register right,
-                                     TypeInfo left_info,
-                                     TypeInfo right_info,
-                                     DeferredCode* deferred);
-
-  // If possible, combine two constant smi values using op to produce
-  // a smi result, and push it on the virtual frame, all at compile time.
-  // Returns true if it succeeds.  Otherwise it has no effect.
-  bool FoldConstantSmis(Token::Value op, int left, int right);
-
-  // Emit code to perform a binary operation on a constant
-  // smi and a likely smi.  Consumes the Result *operand.
-  Result ConstantSmiBinaryOperation(BinaryOperation* expr,
-                                    Result* operand,
-                                    Handle<Object> constant_operand,
-                                    bool reversed,
-                                    OverwriteMode overwrite_mode);
-
-  // Emit code to perform a binary operation on two likely smis.
-  // The code to handle smi arguments is produced inline.
-  // Consumes the Results *left and *right.
-  Result LikelySmiBinaryOperation(BinaryOperation* expr,
-                                  Result* left,
-                                  Result* right,
-                                  OverwriteMode overwrite_mode);
-
-  void Comparison(AstNode* node,
-                  Condition cc,
-                  bool strict,
-                  ControlDestination* destination);
-
-  // If at least one of the sides is a constant smi, generate optimized code.
-  void ConstantSmiComparison(Condition cc,
-                             bool strict,
-                             ControlDestination* destination,
-                             Result* left_side,
-                             Result* right_side,
-                             bool left_side_constant_smi,
-                             bool right_side_constant_smi,
-                             bool is_loop_condition);
-
-  void GenerateInlineNumberComparison(Result* left_side,
-                                      Result* right_side,
-                                      Condition cc,
-                                      ControlDestination* dest);
-
-  // To prevent long attacker-controlled byte sequences, integer constants
-  // from the JavaScript source are loaded in two parts if they are larger
-  // than 16 bits.
-  static const int kMaxSmiInlinedBits = 16;
-  bool IsUnsafeSmi(Handle<Object> value);
-  // Load an integer constant x into a register target using
-  // at most 16 bits of user-controlled data per assembly operation.
-  void LoadUnsafeSmi(Register target, Handle<Object> value);
-
-  void CallWithArguments(ZoneList<Expression*>* arguments,
-                         CallFunctionFlags flags,
-                         int position);
-
-  // An optimized implementation of expressions of the form
-  // x.apply(y, arguments).  We call x the applicand and y the receiver.
-  // The optimization avoids allocating an arguments object if possible.
-  void CallApplyLazy(Expression* applicand,
-                     Expression* receiver,
-                     VariableProxy* arguments,
-                     int position);
-
-  void CheckStack();
-
-  bool CheckForInlineRuntimeCall(CallRuntime* node);
-
-  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
-  // Declare global variables and functions in the given array of
-  // name/value pairs.
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Instantiate the function based on the shared function info.
-  void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
-                           bool pretenure);
-
-  // Support for type checks.
-  void GenerateIsSmi(ZoneList<Expression*>* args);
-  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
-  void GenerateIsArray(ZoneList<Expression*>* args);
-  void GenerateIsRegExp(ZoneList<Expression*>* args);
-  void GenerateIsObject(ZoneList<Expression*>* args);
-  void GenerateIsSpecObject(ZoneList<Expression*>* args);
-  void GenerateIsFunction(ZoneList<Expression*>* args);
-  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
-  void GenerateIsStringWrapperSafeForDefaultValueOf(
-      ZoneList<Expression*>* args);
-
-  // Support for construct call checks.
-  void GenerateIsConstructCall(ZoneList<Expression*>* args);
-
-  // Support for arguments.length and arguments[?].
-  void GenerateArgumentsLength(ZoneList<Expression*>* args);
-  void GenerateArguments(ZoneList<Expression*>* args);
-
-  // Support for accessing the class and value fields of an object.
-  void GenerateClassOf(ZoneList<Expression*>* args);
-  void GenerateValueOf(ZoneList<Expression*>* args);
-  void GenerateSetValueOf(ZoneList<Expression*>* args);
-
-  // Fast support for charCodeAt(n).
-  void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharFromCode(ZoneList<Expression*>* args);
-
-  // Fast support for string.charAt(n) and string[n].
-  void GenerateStringCharAt(ZoneList<Expression*>* args);
-
-  // Fast support for object equality testing.
-  void GenerateObjectEquals(ZoneList<Expression*>* args);
-
-  void GenerateLog(ZoneList<Expression*>* args);
-
-  void GenerateGetFramePointer(ZoneList<Expression*>* args);
-
-  // Fast support for Math.random().
-  void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
-
-  // Fast support for StringAdd.
-  void GenerateStringAdd(ZoneList<Expression*>* args);
-
-  // Fast support for SubString.
-  void GenerateSubString(ZoneList<Expression*>* args);
-
-  // Fast support for StringCompare.
-  void GenerateStringCompare(ZoneList<Expression*>* args);
-
-  // Support for direct calls from JavaScript to native RegExp code.
-  void GenerateRegExpExec(ZoneList<Expression*>* args);
-
-  void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
-
-  // Support for fast native caches.
-  void GenerateGetFromCache(ZoneList<Expression*>* args);
-
-  // Fast support for number to string.
-  void GenerateNumberToString(ZoneList<Expression*>* args);
-
-  // Fast swapping of elements. Takes three expressions, the object and two
-  // indices. This should only be used if the indices are known to be
-  // non-negative and within bounds of the elements array at the call site.
-  void GenerateSwapElements(ZoneList<Expression*>* args);
-
-  // Fast call for custom callbacks.
-  void GenerateCallFunction(ZoneList<Expression*>* args);
-
-  // Fast call to math functions.
-  void GenerateMathPow(ZoneList<Expression*>* args);
-  void GenerateMathSin(ZoneList<Expression*>* args);
-  void GenerateMathCos(ZoneList<Expression*>* args);
-  void GenerateMathSqrt(ZoneList<Expression*>* args);
-  void GenerateMathLog(ZoneList<Expression*>* args);
-
-  // Check whether two RegExps are equivalent.
-  void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
-
-  void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
-  void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
-
-  // Simple condition analysis.
-  enum ConditionAnalysis {
-    ALWAYS_TRUE,
-    ALWAYS_FALSE,
-    DONT_KNOW
-  };
-  ConditionAnalysis AnalyzeCondition(Expression* cond);
-
-  // Methods used to indicate which source code is generated for. Source
-  // positions are collected by the assembler and emitted with the relocation
-  // information.
-  void CodeForFunctionPosition(FunctionLiteral* fun);
-  void CodeForReturnPosition(FunctionLiteral* fun);
-  void CodeForStatementPosition(Statement* node);
-  void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
-  void CodeForSourcePosition(int pos);
-
-  void SetTypeForStackSlot(Slot* slot, TypeInfo info);
-
-#ifdef DEBUG
-  // True if the registers are valid for entry to a block.  There should
-  // be no frame-external references to (non-reserved) registers.
-  bool HasValidEntryRegisters();
-#endif
-
-  ZoneList<DeferredCode*> deferred_;
-
-  // Assembler
-  MacroAssembler* masm_;  // to generate code
-
-  CompilationInfo* info_;
-
-  // Code generation state
-  VirtualFrame* frame_;
-  RegisterAllocator* allocator_;
-  CodeGenState* state_;
-  int loop_nesting_;
-
-  // Jump targets.
-  // The target of the return from the function.
-  BreakTarget function_return_;
-
-  // True if the function return is shadowed (ie, jumping to the target
-  // function_return_ does not jump to the true function return, but rather
-  // to some unlinking code).
-  bool function_return_is_shadowed_;
-
-  // True when we are in code that expects the virtual frame to be fully
-  // spilled.  Some virtual frame function are disabled in DEBUG builds when
-  // called from spilled code, because they do not leave the virtual frame
-  // in a spilled state.
-  bool in_spilled_code_;
-
-  friend class VirtualFrame;
-  friend class Isolate;
-  friend class JumpTarget;
-  friend class Reference;
-  friend class Result;
-  friend class FastCodeGenerator;
-  friend class FullCodeGenerator;
-  friend class FullCodeGenSyntaxChecker;
-
-  friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
-  friend class InlineRuntimeFunctionsTable;
-
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index b49fb1c..e637ba1 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -42,10 +42,12 @@
 namespace internal {
 
 void CPU::Setup() {
-  Isolate::Current()->cpu_features()->Probe(true);
-  if (Serializer::enabled()) {
-    V8::DisableCrankshaft();
-  }
+  CpuFeatures::Probe();
+}
+
+
+bool CPU::SupportsCrankshaft() {
+  return true;  // Yay!
 }
 
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 0398465..423e6f2 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -29,7 +29,8 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "assembler.h"
+#include "codegen.h"
 #include "debug.h"
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 2080c61..abac2b6 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -600,7 +600,6 @@
 
 void Deoptimizer::EntryGenerator::Generate() {
   GeneratePrologue();
-  CpuFeatures::Scope scope(SSE2);
 
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
@@ -663,23 +662,26 @@
   __ neg(arg5);
 
   // Allocate a new deoptimizer object.
-  __ PrepareCallCFunction(5);
+  __ PrepareCallCFunction(6);
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ movq(arg1, rax);
-  __ movq(arg2, Immediate(type()));
+  __ Set(arg2, type());
   // Args 3 and 4 are already in the right registers.
 
-  // On windows put the argument on the stack (PrepareCallCFunction have
-  // created space for this). On linux pass the argument in r8.
+  // On windows put the arguments on the stack (PrepareCallCFunction
+  // has created space for this). On linux pass the arguments in r8 and r9.
 #ifdef _WIN64
   __ movq(Operand(rsp, 4 * kPointerSize), arg5);
+  __ LoadAddress(arg5, ExternalReference::isolate_address());
+  __ movq(Operand(rsp, 5 * kPointerSize), arg5);
 #else
   __ movq(r8, arg5);
+  __ LoadAddress(r9, ExternalReference::isolate_address());
 #endif
 
   Isolate* isolate = masm()->isolate();
 
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 5);
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -722,10 +724,11 @@
 
   // Compute the output frame in the deoptimizer.
   __ push(rax);
-  __ PrepareCallCFunction(1);
+  __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
+  __ LoadAddress(arg2, ExternalReference::isolate_address());
   __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+      ExternalReference::compute_output_frames_function(isolate), 2);
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 189ee42..82bc6ef 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -652,6 +652,9 @@
     case 2:
       mnem = "adc";
       break;
+    case 3:
+      mnem = "sbb";
+      break;
     case 4:
       mnem = "and";
       break;
@@ -1018,12 +1021,26 @@
         current += PrintRightOperand(current);
         AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3);
         current += 1;
+      } else if (third_byte == 0x0b) {
+        get_modrm(*current, &mod, &regop, &rm);
+         // roundsd xmm, xmm/m64, imm8
+        AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop));
+        current += PrintRightOperand(current);
+        AppendToBuffer(", %d", (*current) & 3);
+        current += 1;
       } else {
         UnimplementedInstruction();
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x6E) {
+      if (opcode == 0x28) {
+        AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
+        current += PrintRightXMMOperand(current);
+      } else if (opcode == 0x29) {
+        AppendToBuffer("movapd ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0x6E) {
         AppendToBuffer("mov%c %s,",
                        rex_w() ? 'q' : 'd',
                        NameOfXMMRegister(regop));
@@ -1041,6 +1058,10 @@
         AppendToBuffer("movdqa ");
         current += PrintRightXMMOperand(current);
         AppendToBuffer(", %s", NameOfXMMRegister(regop));
+      } else if (opcode == 0xD6) {
+        AppendToBuffer("movq ");
+        current += PrintRightXMMOperand(current);
+        AppendToBuffer(", %s", NameOfXMMRegister(regop));
       } else {
         const char* mnemonic = "?";
         if (opcode == 0x50) {
@@ -1142,6 +1163,11 @@
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
       current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x7E) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("movq %s, ", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else {
       UnimplementedInstruction();
     }
@@ -1159,6 +1185,22 @@
       current += 4;
     }  // else no immediate displacement.
     AppendToBuffer("nop");
+
+  } else if (opcode == 28) {
+    // movaps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
+  } else if (opcode == 29) {
+    // movaps xmm/m128, xmm
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("movaps");
+    current += PrintRightXMMOperand(current);
+    AppendToBuffer(", %s", NameOfXMMRegister(regop));
+
   } else if (opcode == 0xA2 || opcode == 0x31) {
     // RDTSC or CPUID
     AppendToBuffer("%s", mnemonic);
@@ -1170,6 +1212,13 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
+  } else if (opcode == 57) {
+    // xoprps xmm, xmm/m128
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
+
   } else if ((opcode & 0xF0) == 0x80) {
     // Jcc: Conditional jump (branch).
     current = data + JumpConditional(data);
@@ -1502,7 +1551,39 @@
         data++;
       }
         break;
-
+      case 0xB0:
+      case 0xB1:
+      case 0xB2:
+      case 0xB3:
+      case 0xB4:
+      case 0xB5:
+      case 0xB6:
+      case 0xB7:
+      case 0xB8:
+      case 0xB9:
+      case 0xBA:
+      case 0xBB:
+      case 0xBC:
+      case 0xBD:
+      case 0xBE:
+      case 0xBF: {
+        // mov reg8,imm8 or mov reg32,imm32
+        byte opcode = *data;
+        data++;
+        bool is_32bit = (opcode >= 0xB8);
+        int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
+        if (is_32bit) {
+          AppendToBuffer("mov%c %s, ",
+                         operand_size_code(),
+                         NameOfCPURegister(reg));
+          data += PrintImmediate(data, DOUBLEWORD_SIZE);
+        } else {
+          AppendToBuffer("movb %s, ",
+                         NameOfByteCPURegister(reg));
+          data += PrintImmediate(data, BYTE_SIZE);
+        }
+        break;
+      }
       case 0xFE: {
         data++;
         int mod, regop, rm;
@@ -1513,9 +1594,8 @@
         } else {
           UnimplementedInstruction();
         }
-      }
         break;
-
+      }
       case 0x68:
         AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
         data += 5;
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 81be819..b14267c 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -99,7 +99,7 @@
  public:
   // FP-relative.
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = +2 * kPointerSize;
+  static const int kLastParameterOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
   // Caller SP-relative.
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 90afd85..d5fb7da 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "code-stubs.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
@@ -232,7 +232,7 @@
     }
 
     { Comment cmnt(masm_, "[ Stack check");
-      PrepareForBailout(info->function(), NO_REGISTERS);
+      PrepareForBailoutForId(AstNode::kFunctionEntryId, NO_REGISTERS);
       NearLabel ok;
       __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
       __ j(above_equal, &ok);
@@ -781,7 +781,7 @@
   // Compile all the tests with branches to their bodies.
   for (int i = 0; i < clauses->length(); i++) {
     CaseClause* clause = clauses->at(i);
-    clause->body_target()->entry_label()->Unuse();
+    clause->body_target()->Unuse();
 
     // The default is not a test, but remember it as final fall through.
     if (clause->is_default()) {
@@ -809,7 +809,7 @@
       __ cmpq(rdx, rax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
-      __ jmp(clause->body_target()->entry_label());
+      __ jmp(clause->body_target());
       __ bind(&slow_case);
     }
 
@@ -821,7 +821,7 @@
     __ testq(rax, rax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
-    __ jmp(clause->body_target()->entry_label());
+    __ jmp(clause->body_target());
   }
 
   // Discard the test value and jump to the default if present, otherwise to
@@ -831,14 +831,14 @@
   if (default_clause == NULL) {
     __ jmp(nested_statement.break_target());
   } else {
-    __ jmp(default_clause->body_target()->entry_label());
+    __ jmp(default_clause->body_target());
   }
 
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
     CaseClause* clause = clauses->at(i);
-    __ bind(clause->body_target()->entry_label());
+    __ bind(clause->body_target());
     PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
     VisitStatements(clause->statements());
   }
@@ -1576,27 +1576,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(rax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -2248,15 +2247,6 @@
       }
     }
   } else {
-    // Call to some other expression.  If the expression is an anonymous
-    // function literal not called in a loop, mark it as one that should
-    // also use the full code generator.
-    FunctionLiteral* lit = fun->AsFunctionLiteral();
-    if (lit != NULL &&
-        lit->name()->Equals(isolate()->heap()->empty_string()) &&
-        loop_depth() == 0) {
-      lit->set_try_full_codegen(true);
-    }
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(fun);
     }
@@ -2435,11 +2425,71 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // Just indicate false, as %_IsStringWrapperSafeForDefaultValueOf() is only
-  // used in a few functions in runtime.js which should not normally be hit by
-  // this compiler.
+  if (FLAG_debug_code) __ AbortIfSmi(rax);
+
+  // Check whether this map has already been checked to be safe for default
+  // valueOf.
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rbx, Map::kBitField2Offset),
+           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ j(not_zero, if_true);
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ movq(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
+  __ j(equal, if_false);
+
+  // Look for valueOf symbol in the descriptor array, and indicate false if
+  // found. The type is not checked, so if it is a transition it is a false
+  // negative.
+  __ movq(rbx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
+  __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+  // rbx: descriptor array
+  // rcx: length of descriptor array
+  // Calculate the end of the descriptor array.
+  SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
+  __ lea(rcx,
+         Operand(
+             rbx, index.reg, index.scale, FixedArray::kHeaderSize));
+  // Calculate location of the first key name.
+  __ addq(rbx,
+          Immediate(FixedArray::kHeaderSize +
+                    DescriptorArray::kFirstIndex * kPointerSize));
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // symbol valueOf the result is false.
+  Label entry, loop;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(rdx, FieldOperand(rbx, 0));
+  __ Cmp(rdx, FACTORY->value_of_symbol());
+  __ j(equal, if_false);
+  __ addq(rbx, Immediate(kPointerSize));
+  __ bind(&entry);
+  __ cmpq(rbx, rcx);
+  __ j(not_equal, &loop);
+
+  // Reload map as register rbx was used as temporary above.
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+
+  // If a valueOf property is not found on the object check that it's
+  // prototype is the un-modified String prototype. If not result is false.
+  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+  __ testq(rcx, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
+  __ cmpq(rcx,
+          ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, if_false);
+  // Set the bit in the map to indicate that it has been checked safe for
+  // default valueOf and set true result.
+  __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+         Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ jmp(if_true);
+
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ jmp(if_false);
   context()->Plug(if_true, if_false);
 }
 
@@ -2693,8 +2743,13 @@
 
   // Return a random uint32 number in rax.
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
-  __ PrepareCallCFunction(0);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 0);
+  __ PrepareCallCFunction(1);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
   // Convert 32 random bits in rax to 0.(32 random bits) in a double
   // by computing:
@@ -2703,7 +2758,7 @@
   __ movd(xmm1, rcx);
   __ movd(xmm0, rax);
   __ cvtss2sd(xmm1, xmm1);
-  __ xorpd(xmm0, xmm1);
+  __ xorps(xmm0, xmm1);
   __ subsd(xmm0, xmm1);
   __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
 
@@ -2988,15 +3043,14 @@
 void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
-  int arg_count = args->length() - 2;  // For receiver and function.
-  VisitForStackValue(args->at(0));  // Receiver.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i + 1));
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
   }
-  VisitForAccumulatorValue(args->at(arg_count + 1));  // Function.
+  VisitForAccumulatorValue(args->last());  // Function.
 
-  // InvokeFunction requires function in rdi. Move it in there.
-  if (!result_register().is(rdi)) __ movq(rdi, result_register());
+  // InvokeFunction requires the function in rdi. Move it in there.
+  __ movq(rdi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(rdi, count, CALL_FUNCTION);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -3753,7 +3807,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(expr->CountId(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
@@ -4173,30 +4231,7 @@
     default:
       break;
   }
-
   __ call(ic, mode);
-
-  // Crankshaft doesn't need patching of inlined loads and stores.
-  // When compiling the snapshot we need to produce code that works
-  // with and without Crankshaft.
-  if (V8::UseCrankshaft() && !Serializer::enabled()) {
-    return;
-  }
-
-  // If we're calling a (keyed) load or store stub, we have to mark
-  // the call as containing no inlined code so we will not attempt to
-  // patch it.
-  switch (ic->kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      __ nop();  // Signals no inlined code.
-      break;
-    default:
-      // Do nothing.
-      break;
-  }
 }
 
 
@@ -4217,7 +4252,6 @@
     default:
       break;
   }
-
   __ call(ic, RelocInfo::CODE_TARGET);
   if (patch_site != NULL && patch_site->is_bound()) {
     patch_site->EmitPatchInfo();
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9180465..5ed89b5 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -29,7 +29,7 @@
 
 #if defined(V8_TARGET_ARCH_X64)
 
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
@@ -381,11 +381,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// load instruction.
-const int LoadIC::kOffsetToLoadInstruction = 20;
-
-
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
@@ -1010,7 +1005,7 @@
 
   // Call the entry.
   CEntryStub stub(1);
-  __ movq(rax, Immediate(2));
+  __ Set(rax, 2);
   __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
   __ CallStub(&stub);
 
@@ -1297,130 +1292,6 @@
 }
 
 
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction.
-  int delta = *reinterpret_cast<int*>(delta_address);
-
-  // The map address is the last 8 bytes of the 10-byte
-  // immediate move instruction, so we add 2 to get the
-  // offset to the last 8 bytes.
-  Address map_address = test_instruction_address + delta + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // The offset is in the 32-bit displacement of a seven byte
-  // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
-  // so we add 3 to get the offset of the displacement.
-  Address offset_address =
-      test_instruction_address + delta + kOffsetToLoadInstruction + 3;
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-  return true;
-}
-
-
-bool LoadIC::PatchInlinedContextualLoad(Address address,
-                                        Object* map,
-                                        Object* cell,
-                                        bool is_dont_delete) {
-  // TODO(<bug#>): implement this.
-  return false;
-}
-
-
-bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  if (V8::UseCrankshaft()) return false;
-
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test rax, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Extract the encoded deltas from the test rax instruction.
-  Address encoded_offsets_address = test_instruction_address + 1;
-  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
-  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
-  int delta_to_record_write = encoded_offsets >> 16;
-
-  // Patch the map to check. The map address is the last 8 bytes of
-  // the 10-byte immediate move instruction.
-  Address map_check_address = test_instruction_address + delta_to_map_check;
-  Address map_address = map_check_address + 2;
-  *(reinterpret_cast<Object**>(map_address)) = map;
-
-  // Patch the offset in the store instruction. The offset is in the
-  // last 4 bytes of a 7 byte register-to-memory move instruction.
-  Address offset_address =
-      map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
-  // The offset should have initial value (kMaxInt - 1), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  // Patch the offset in the write-barrier code. The offset is the
-  // last 4 bytes of a 7 byte lea instruction.
-  offset_address = map_check_address + delta_to_record_write + 3;
-  // The offset should have initial value (kMaxInt), cleared value
-  // (-1) or we should be clearing the inlined version.
-  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
-         *reinterpret_cast<int*>(offset_address) == -1 ||
-         (offset == 0 && map == HEAP->null_value()));
-  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
-
-  return true;
-}
-
-
-static bool PatchInlinedMapCheck(Address address, Object* map) {
-  if (V8::UseCrankshaft()) return false;
-
-  // Arguments are address of start of call sequence that called
-  // the IC,
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-  // The keyed load has a fast inlined case if the IC call instruction
-  // is immediately followed by a test instruction.
-  if (*test_instruction_address != Assembler::kTestEaxByte) return false;
-
-  // Fetch the offset from the test instruction to the map compare
-  // instructions (starting with the 64-bit immediate mov of the map
-  // address). This offset is stored in the last 4 bytes of the 5
-  // byte test instruction.
-  Address delta_address = test_instruction_address + 1;
-  int delta = *reinterpret_cast<int*>(delta_address);
-  // Compute the map address.  The map address is in the last 8 bytes
-  // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
-  // to the offset to get the map address.
-  Address map_address = test_instruction_address + delta + 2;
-  // Patch the map check.
-  *(reinterpret_cast<Object**>(map_address)) = map;
-  return true;
-}
-
-
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  return PatchInlinedMapCheck(address, map);
-}
-
-
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : key
@@ -1503,11 +1374,6 @@
 }
 
 
-// The offset from the inlined patch site to the start of the inlined
-// store instruction.
-const int StoreIC::kOffsetToStoreInstruction = 20;
-
-
 void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : value
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
deleted file mode 100644
index e715604..0000000
--- a/src/x64/jump-target-x64.cc
+++ /dev/null
@@ -1,437 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "jump-target-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// JumpTarget implementation.
-
-#define __ ACCESS_MASM(cgen()->masm())
-
-void JumpTarget::DoJump() {
-  ASSERT(cgen()->has_valid_frame());
-  // Live non-frame registers are not allowed at unconditional jumps
-  // because we have no way of invalidating the corresponding results
-  // which are still live in the C++ code.
-  ASSERT(cgen()->HasValidEntryRegisters());
-
-  if (is_bound()) {
-    // Backward jump.  There is an expected frame to merge to.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else if (entry_frame_ != NULL) {
-    // Forward jump with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and jump to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-  } else {
-    // Forward jump.  Remember the current frame and emit a jump to
-    // its merge code.
-    AddReachingFrame(cgen()->frame());
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-    __ jmp(&merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::DoBranch(Condition cc, Hint b) {
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-
-  if (is_bound()) {
-    ASSERT(direction_ == BIDIRECTIONAL);
-    // Backward branch.  We have an expected frame to merge to on the
-    // backward edge.
-
-    // Swap the current frame for a copy (we do the swapping to get
-    // the off-frame registers off the fall through) to use for the
-    // branch.
-    VirtualFrame* fall_through_frame = cgen()->frame();
-    VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
-    RegisterFile non_frame_registers;
-    cgen()->SetFrame(branch_frame, &non_frame_registers);
-
-    // Check if we can avoid merge code.
-    cgen()->frame()->PrepareMergeTo(entry_frame_);
-    if (cgen()->frame()->Equals(entry_frame_)) {
-      // Branch right in to the block.
-      cgen()->DeleteFrame();
-      __ j(cc, &entry_label_);
-      cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-      return;
-    }
-
-    // Check if we can reuse existing merge code.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (reaching_frames_[i] != NULL &&
-          cgen()->frame()->Equals(reaching_frames_[i])) {
-        // Branch to the merge code.
-        cgen()->DeleteFrame();
-        __ j(cc, &merge_labels_[i]);
-        cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-        return;
-      }
-    }
-
-    // To emit the merge code here, we negate the condition and branch
-    // around the merge code on the fall through path.
-    Label original_fall_through;
-    __ j(NegateCondition(cc), &original_fall_through);
-    cgen()->frame()->MergeTo(entry_frame_);
-    cgen()->DeleteFrame();
-    __ jmp(&entry_label_);
-    cgen()->SetFrame(fall_through_frame, &non_frame_registers);
-    __ bind(&original_fall_through);
-
-  } else if (entry_frame_ != NULL) {
-    // Forward branch with a preconfigured entry frame.  Assert the
-    // current frame matches the expected one and branch to the block.
-    ASSERT(cgen()->frame()->Equals(entry_frame_));
-    // Explicitly use the macro assembler instead of __ as forward
-    // branches are expected to be a fixed size (no inserted
-    // coverage-checking instructions please).  This is used in
-    // Reference::GetValue.
-    cgen()->masm()->j(cc, &entry_label_);
-
-  } else {
-    // Forward branch.  A copy of the current frame is remembered and
-    // a branch to the merge code is emitted.  Explicitly use the
-    // macro assembler instead of __ as forward branches are expected
-    // to be a fixed size (no inserted coverage-checking instructions
-    // please).  This is used in Reference::GetValue.
-    AddReachingFrame(new VirtualFrame(cgen()->frame()));
-    cgen()->masm()->j(cc, &merge_labels_.last());
-  }
-}
-
-
-void JumpTarget::Call() {
-  // Call is used to push the address of the catch block on the stack as
-  // a return address when compiling try/catch and try/finally.  We
-  // fully spill the frame before making the call.  The expected frame
-  // at the label (which should be the only one) is the spilled current
-  // frame plus an in-memory return address.  The "fall-through" frame
-  // at the return site is the spilled current frame.
-  ASSERT(cgen() != NULL);
-  ASSERT(cgen()->has_valid_frame());
-  // There are no non-frame references across the call.
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ASSERT(!is_linked());
-
-  cgen()->frame()->SpillAll();
-  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
-  target_frame->Adjust(1);
-  // We do not expect a call with a preconfigured entry frame.
-  ASSERT(entry_frame_ == NULL);
-  AddReachingFrame(target_frame);
-  __ call(&merge_labels_.last());
-}
-
-
-void JumpTarget::DoBind() {
-  ASSERT(cgen() != NULL);
-  ASSERT(!is_bound());
-
-  // Live non-frame registers are not allowed at the start of a basic
-  // block.
-  ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
-  // Fast case: the jump target was manually configured with an entry
-  // frame to use.
-  if (entry_frame_ != NULL) {
-    // Assert no reaching frames to deal with.
-    ASSERT(reaching_frames_.is_empty());
-    ASSERT(!cgen()->has_valid_frame());
-
-    RegisterFile empty;
-    if (direction_ == BIDIRECTIONAL) {
-      // Copy the entry frame so the original can be used for a
-      // possible backward jump.
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    } else {
-      // Take ownership of the entry frame.
-      cgen()->SetFrame(entry_frame_, &empty);
-      entry_frame_ = NULL;
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (!is_linked()) {
-    ASSERT(cgen()->has_valid_frame());
-    if (direction_ == FORWARD_ONLY) {
-      // Fast case: no forward jumps and no possible backward jumps.
-      // The stack pointer can be floating above the top of the
-      // virtual frame before the bind.  Afterward, it should not.
-      VirtualFrame* frame = cgen()->frame();
-      int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-      if (difference > 0) {
-        frame->stack_pointer_ -= difference;
-        __ addq(rsp, Immediate(difference * kPointerSize));
-      }
-    } else {
-      ASSERT(direction_ == BIDIRECTIONAL);
-      // Fast case: no forward jumps, possible backward ones.  Remove
-      // constants and copies above the watermark on the fall-through
-      // frame and use it as the entry frame.
-      cgen()->frame()->MakeMergable();
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    }
-    __ bind(&entry_label_);
-    return;
-  }
-
-  if (direction_ == FORWARD_ONLY &&
-      !cgen()->has_valid_frame() &&
-      reaching_frames_.length() == 1) {
-    // Fast case: no fall-through, a single forward jump, and no
-    // possible backward jumps.  Pick up the only reaching frame, take
-    // ownership of it, and use it for the block about to be emitted.
-    VirtualFrame* frame = reaching_frames_[0];
-    RegisterFile empty;
-    cgen()->SetFrame(frame, &empty);
-    reaching_frames_[0] = NULL;
-    __ bind(&merge_labels_[0]);
-
-    // The stack pointer can be floating above the top of the
-    // virtual frame before the bind.  Afterward, it should not.
-    int difference = frame->stack_pointer_ - (frame->element_count() - 1);
-    if (difference > 0) {
-      frame->stack_pointer_ -= difference;
-      __ addq(rsp, Immediate(difference * kPointerSize));
-    }
-
-    __ bind(&entry_label_);
-    return;
-  }
-
-  // If there is a current frame, record it as the fall-through.  It
-  // is owned by the reaching frames for now.
-  bool had_fall_through = false;
-  if (cgen()->has_valid_frame()) {
-    had_fall_through = true;
-    AddReachingFrame(cgen()->frame());  // Return value ignored.
-    RegisterFile empty;
-    cgen()->SetFrame(NULL, &empty);
-  }
-
-  // Compute the frame to use for entry to the block.
-  ComputeEntryFrame();
-
-  // Some moves required to merge to an expected frame require purely
-  // frame state changes, and do not require any code generation.
-  // Perform those first to increase the possibility of finding equal
-  // frames below.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    if (reaching_frames_[i] != NULL) {
-      reaching_frames_[i]->PrepareMergeTo(entry_frame_);
-    }
-  }
-
-  if (is_linked()) {
-    // There were forward jumps.  Handle merging the reaching frames
-    // to the entry frame.
-
-    // Loop over the (non-null) reaching frames and process any that
-    // need merge code.  Iterate backwards through the list to handle
-    // the fall-through frame first.  Set frames that will be
-    // processed after 'i' to NULL if we want to avoid processing
-    // them.
-    for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
-      VirtualFrame* frame = reaching_frames_[i];
-
-      if (frame != NULL) {
-        // Does the frame (probably) need merge code?
-        if (!frame->Equals(entry_frame_)) {
-          // We could have a valid frame as the fall through to the
-          // binding site or as the fall through from a previous merge
-          // code block.  Jump around the code we are about to
-          // generate.
-          if (cgen()->has_valid_frame()) {
-            cgen()->DeleteFrame();
-            __ jmp(&entry_label_);
-          }
-          // Pick up the frame for this block.  Assume ownership if
-          // there cannot be backward jumps.
-          RegisterFile empty;
-          if (direction_ == BIDIRECTIONAL) {
-            cgen()->SetFrame(new VirtualFrame(frame), &empty);
-          } else {
-            cgen()->SetFrame(frame, &empty);
-            reaching_frames_[i] = NULL;
-          }
-          __ bind(&merge_labels_[i]);
-
-          // Loop over the remaining (non-null) reaching frames,
-          // looking for any that can share merge code with this one.
-          for (int j = 0; j < i; j++) {
-            VirtualFrame* other = reaching_frames_[j];
-            if (other != NULL && other->Equals(cgen()->frame())) {
-              // Set the reaching frame element to null to avoid
-              // processing it later, and then bind its entry label.
-              reaching_frames_[j] = NULL;
-              __ bind(&merge_labels_[j]);
-            }
-          }
-
-          // Emit the merge code.
-          cgen()->frame()->MergeTo(entry_frame_);
-        } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
-          // If this is the fall through frame, and it didn't need
-          // merge code, we need to pick up the frame so we can jump
-          // around subsequent merge blocks if necessary.
-          RegisterFile empty;
-          cgen()->SetFrame(frame, &empty);
-          reaching_frames_[i] = NULL;
-        }
-      }
-    }
-
-    // The code generator may not have a current frame if there was no
-    // fall through and none of the reaching frames needed merging.
-    // In that case, clone the entry frame as the current frame.
-    if (!cgen()->has_valid_frame()) {
-      RegisterFile empty;
-      cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-    }
-
-    // There may be unprocessed reaching frames that did not need
-    // merge code.  They will have unbound merge labels.  Bind their
-    // merge labels to be the same as the entry label and deallocate
-    // them.
-    for (int i = 0; i < reaching_frames_.length(); i++) {
-      if (!merge_labels_[i].is_bound()) {
-        reaching_frames_[i] = NULL;
-        __ bind(&merge_labels_[i]);
-      }
-    }
-
-    // There are non-NULL reaching frames with bound labels for each
-    // merge block, but only on backward targets.
-  } else {
-    // There were no forward jumps.  There must be a current frame and
-    // this must be a bidirectional target.
-    ASSERT(reaching_frames_.length() == 1);
-    ASSERT(reaching_frames_[0] != NULL);
-    ASSERT(direction_ == BIDIRECTIONAL);
-
-    // Use a copy of the reaching frame so the original can be saved
-    // for possible reuse as a backward merge block.
-    RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
-    __ bind(&merge_labels_[0]);
-    cgen()->frame()->MergeTo(entry_frame_);
-  }
-
-  __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  // Drop leftover statement state from the frame before merging, without
-  // emitting code.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->ForgetElements(count);
-  cgen()->frame()->Push(arg);
-  DoJump();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-  }
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_ + 1);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even on
-  // the fall through.  This is so we can bind the return target with state
-  // on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    cgen()->frame()->ForgetElements(count);
-    cgen()->frame()->Push(arg);
-  }
-  DoBind();
-  *arg = cgen()->frame()->Pop();
-}
-
-
-#undef __
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 86a7e83..c242874 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -91,7 +91,7 @@
 
 void LCodeGen::FinishCode(Handle<Code> code) {
   ASSERT(is_done());
-  code->set_stack_slots(StackSlotCount());
+  code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
   Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
@@ -146,10 +146,10 @@
   __ push(rdi);  // Callee's JS function.
 
   // Reserve space for the stack slots needed by the code.
-  int slots = StackSlotCount();
+  int slots = GetStackSlotCount();
   if (slots > 0) {
     if (FLAG_debug_code) {
-      __ movl(rax, Immediate(slots));
+      __ Set(rax, slots);
       __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
       Label loop;
       __ bind(&loop);
@@ -290,7 +290,7 @@
   while (byte_count-- > 0) {
     __ int3();
   }
-  safepoints_.Emit(masm(), StackSlotCount());
+  safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
 
@@ -418,7 +418,7 @@
     translation->StoreDoubleStackSlot(op->index());
   } else if (op->IsArgument()) {
     ASSERT(is_tagged);
-    int src_index = StackSlotCount() + op->index();
+    int src_index = GetStackSlotCount() + op->index();
     translation->StoreStackSlot(src_index);
   } else if (op->IsRegister()) {
     Register reg = ToRegister(op);
@@ -440,14 +440,16 @@
 }
 
 
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr) {
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode,
+                               int argc) {
   ASSERT(instr != NULL);
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ call(code, mode);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, safepoint_mode, argc);
 
   // Signal that we don't inline smi code before these stubs in the
   // optimizing code generator.
@@ -458,6 +460,13 @@
 }
 
 
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
 void LCodeGen::CallRuntime(const Runtime::Function* function,
                            int num_arguments,
                            LInstruction* instr) {
@@ -467,11 +476,23 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr) {
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+}
+
+
+void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
+                                          SafepointMode safepoint_mode,
+                                          int argc) {
   // Create the environment to bailout to. If the call has side effects
   // execution has to continue after the call otherwise execution can continue
   // from a previous bailout point repeating the call.
@@ -483,8 +504,17 @@
   }
 
   RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  RecordSafepoint(instr->pointer_map(),
-                  deoptimization_environment->deoptimization_index());
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    ASSERT(argc == 0);
+    RecordSafepoint(instr->pointer_map(),
+                    deoptimization_environment->deoptimization_index());
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(),
+        argc,
+        deoptimization_environment->deoptimization_index());
+  }
 }
 
 
@@ -534,7 +564,7 @@
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
         jump_table_.last().address != entry) {
-      jump_table_.Add(entry);
+      jump_table_.Add(JumpTableEntry(entry));
     }
     __ j(cc, &jump_table_.last().label);
   }
@@ -605,6 +635,8 @@
     Safepoint::Kind kind,
     int arguments,
     int deoptimization_index) {
+  ASSERT(kind == expected_safepoint_kind_);
+
   const ZoneList<LOperand*>* operands = pointers->operands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -1067,7 +1099,7 @@
 
 void LCodeGen::DoConstantI(LConstantI* instr) {
   ASSERT(instr->result()->IsRegister());
-  __ movl(ToRegister(instr->result()), Immediate(instr->value()));
+  __ Set(ToRegister(instr->result()), instr->value());
 }
 
 
@@ -1079,7 +1111,7 @@
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (int_val == 0) {
-    __ xorpd(res, res);
+    __ xorps(res, res);
   } else {
     Register tmp = ToRegister(instr->TempAt(0));
     __ Set(tmp, int_val);
@@ -1191,12 +1223,12 @@
       break;
     case Token::MOD:
       __ PrepareCallCFunction(2);
-      __ movsd(xmm0, left);
+      __ movaps(xmm0, left);
       ASSERT(right.is(xmm1));
       __ CallCFunction(
           ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
       __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-      __ movsd(result, xmm0);
+      __ movaps(result, xmm0);
       break;
     default:
       UNREACHABLE();
@@ -1255,7 +1287,7 @@
     EmitBranch(true_block, false_block, not_zero);
   } else if (r.IsDouble()) {
     XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(xmm0, xmm0);
+    __ xorps(xmm0, xmm0);
     __ ucomisd(reg, xmm0);
     EmitBranch(true_block, false_block, not_equal);
   } else {
@@ -1290,7 +1322,7 @@
 
       // HeapNumber => false iff +0, -0, or NaN. These three cases set the
       // zero flag when compared to zero using ucomisd.
-      __ xorpd(xmm0, xmm0);
+      __ xorps(xmm0, xmm0);
       __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
       __ j(zero, false_label);
       __ jmp(true_label);
@@ -1328,11 +1360,8 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
-  __ Pushad();
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  __ Popad();
+  PushSafepointRegistersScope scope(this);
+  CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
 }
 
 
@@ -1485,10 +1514,11 @@
 
   __ CompareRoot(reg, Heap::kNullValueRootIndex);
   if (instr->is_strict()) {
+    ASSERT(Heap::kTrueValueRootIndex >= 0);
     __ movl(result, Immediate(Heap::kTrueValueRootIndex));
     NearLabel load;
     __ j(equal, &load);
-    __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+    __ Set(result, Heap::kFalseValueRootIndex);
     __ bind(&load);
     __ LoadRootIndexed(result, result, 0);
   } else {
@@ -1937,23 +1967,36 @@
 
 void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                 Label* map_check) {
-  __ PushSafepointRegisters();
-  InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
-      InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
-  InstanceofStub stub(flags);
+  {
+    PushSafepointRegistersScope scope(this);
+    InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+        InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
+    InstanceofStub stub(flags);
 
-  __ push(ToRegister(instr->InputAt(0)));
-  __ Push(instr->function());
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(temp.is(rdi));
-  static const int kAdditionalDelta = 16;
-  int delta =
-      masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
-  __ movq(temp, Immediate(delta));
-  __ push(temp);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ movq(kScratchRegister, rax);
-  __ PopSafepointRegisters();
+    __ push(ToRegister(instr->InputAt(0)));
+    __ Push(instr->function());
+
+    Register temp = ToRegister(instr->TempAt(0));
+    static const int kAdditionalDelta = 10;
+    int delta =
+        masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
+    ASSERT(delta >= 0);
+    __ push_imm32(delta);
+
+    // We are pushing three values on the stack but recording a
+    // safepoint with two arguments because stub is going to
+    // remove the third argument from the stack before jumping
+    // to instanceof builtin on the slow path.
+    CallCodeGeneric(stub.GetCode(),
+                    RelocInfo::CODE_TARGET,
+                    instr,
+                    RECORD_SAFEPOINT_WITH_REGISTERS,
+                    2);
+    ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+    // Move result to a register that survives the end of the
+    // PushSafepointRegisterScope.
+    __ movq(kScratchRegister, rax);
+  }
   __ testq(kScratchRegister, kScratchRegister);
   Label load_false;
   Label done;
@@ -2015,11 +2058,11 @@
   }
   __ movq(rsp, rbp);
   __ pop(rbp);
-  __ Ret((ParameterCount() + 1) * kPointerSize, rcx);
+  __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   if (result.is(rax)) {
     __ load_rax(instr->hydrogen()->cell().location(),
@@ -2035,7 +2078,19 @@
 }
 
 
-void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(rax));
+  ASSERT(ToRegister(instr->result()).is(rax));
+
+  __ Move(rcx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
+  CallCode(ic, mode, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(!value.is(temp));
@@ -2058,6 +2113,18 @@
 }
 
 
+void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(rdx));
+  ASSERT(ToRegister(instr->value()).is(rax));
+
+  __ Move(rcx, instr->name());
+  Handle<Code> ic = instr->strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
+}
+
+
 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2362,14 +2429,14 @@
   } else {
     __ cmpq(rbp, ToOperand(instr->InputAt(0)));
   }
-  __ movq(result, Immediate(scope()->num_parameters()));
+  __ movl(result, Immediate(scope()->num_parameters()));
   __ j(equal, &done);
 
   // Arguments adaptor frame present. Get argument length from there.
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(result, Operand(result,
-                          ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToInteger32(result, result);
+  __ SmiToInteger32(result,
+                    Operand(result,
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
 
   // Argument length is in result register.
   __ bind(&done);
@@ -2440,25 +2507,19 @@
                                          env->deoptimization_index());
   v8::internal::ParameterCount actual(rax);
   __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
 
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  if (argument->IsConstantOperand()) {
-    EmitPushConstantOperand(argument);
-  } else if (argument->IsRegister()) {
-    __ push(ToRegister(argument));
-  } else {
-    ASSERT(!argument->IsDoubleRegister());
-    __ push(ToOperand(argument));
-  }
+  EmitPushTaggedOperand(argument);
 }
 
 
 void LCodeGen::DoContext(LContext* instr) {
   Register result = ToRegister(instr->result());
-  __ movq(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ movq(result, rsi);
 }
 
 
@@ -2513,7 +2574,7 @@
   }
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr);
+  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT, 0);
 
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2538,7 +2599,7 @@
   Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
 
   // Preserve the value of all registers.
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
 
   Label negative;
   __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@@ -2559,9 +2620,7 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
 
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(rax)) {
     __ movq(tmp, rax);
@@ -2578,7 +2637,6 @@
   __ StoreToSafepointRegisterSlot(input_reg, tmp);
 
   __ bind(&done);
-  __ PopSafepointRegisters();
 }
 
 
@@ -2613,7 +2671,7 @@
   if (r.IsDouble()) {
     XMMRegister scratch = xmm0;
     XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-    __ xorpd(scratch, scratch);
+    __ xorps(scratch, scratch);
     __ subsd(scratch, input_reg);
     __ andpd(input_reg, scratch);
   } else if (r.IsInteger32()) {
@@ -2624,7 +2682,9 @@
     Register input_reg = ToRegister(instr->InputAt(0));
     // Smi check.
     __ JumpIfNotSmi(input_reg, deferred->entry());
+    __ SmiToInteger32(input_reg, input_reg);
     EmitIntegerMathAbs(instr);
+    __ Integer32ToSmi(input_reg, input_reg);
     __ bind(deferred->exit());
   }
 }
@@ -2634,21 +2694,36 @@
   XMMRegister xmm_scratch = xmm0;
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-  __ ucomisd(input_reg, xmm_scratch);
 
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(below_equal, instr->environment());
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatures::Scope scope(SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize if minus zero.
+      __ movq(output_reg, input_reg);
+      __ subq(output_reg, Immediate(1));
+      DeoptimizeIf(overflow, instr->environment());
+    }
+    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+    __ cvttsd2si(output_reg, xmm_scratch);
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   } else {
-    DeoptimizeIf(below, instr->environment());
+    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(below_equal, instr->environment());
+    } else {
+      DeoptimizeIf(below, instr->environment());
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ cvttsd2si(output_reg, input_reg);
+
+    // Overflow is signalled with minint.
+    __ cmpl(output_reg, Immediate(0x80000000));
+    DeoptimizeIf(equal, instr->environment());
   }
-
-  // Use truncating instruction (OK because input is positive).
-  __ cvttsd2si(output_reg, input_reg);
-
-  // Overflow is signalled with minint.
-  __ cmpl(output_reg, Immediate(0x80000000));
-  DeoptimizeIf(equal, instr->environment());
 }
 
 
@@ -2657,33 +2732,44 @@
   Register output_reg = ToRegister(instr->result());
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
 
+  Label done;
   // xmm_scratch = 0.5
   __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
   __ movq(xmm_scratch, kScratchRegister);
-
+  NearLabel below_half;
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_half);  // If input_reg is NaN, this doesn't jump.
   // input = input + 0.5
+  // This addition might give a result that isn't the correct for
+  // rounding, due to loss of precision, but only for a number that's
+  // so big that the conversion below will overflow anyway.
   __ addsd(input_reg, xmm_scratch);
-
-  // We need to return -0 for the input range [-0.5, 0[, otherwise
-  // compute Math.floor(value + 0.5).
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below_equal, instr->environment());
-  } else {
-    // If we don't need to bailout on -0, we check only bailout
-    // on negative inputs.
-    __ xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
-    __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(below, instr->environment());
-  }
-
-  // Compute Math.floor(value + 0.5).
+  // Compute Math.floor(input).
   // Use truncating instruction (OK because input is positive).
   __ cvttsd2si(output_reg, input_reg);
-
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x80000000));
   DeoptimizeIf(equal, instr->environment());
+  __ jmp(&done);
+
+  __ bind(&below_half);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bailout if negative (including -0).
+    __ movq(output_reg, input_reg);
+    __ testq(output_reg, output_reg);
+    DeoptimizeIf(negative, instr->environment());
+  } else {
+    // Bailout if below -0.5, otherwise round to (positive) zero, even
+    // if negative.
+    // xmm_scrach = -0.5
+    __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
+    __ movq(xmm_scratch, kScratchRegister);
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(below, instr->environment());
+  }
+  __ xorl(output_reg, output_reg);
+
+  __ bind(&done);
 }
 
 
@@ -2698,7 +2784,7 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-  __ xorpd(xmm_scratch, xmm_scratch);
+  __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
 }
@@ -2714,7 +2800,7 @@
   if (exponent_type.IsDouble()) {
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     ASSERT(ToDoubleRegister(right).is(xmm1));
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
@@ -2722,7 +2808,7 @@
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers: xmm0 and edi (not rdi).
     // On Windows, the registers are xmm0 and edx.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
 #ifdef _WIN64
     ASSERT(ToRegister(right).is(rdx));
 #else
@@ -2732,7 +2818,6 @@
         ExternalReference::power_double_int_function(isolate()), 2);
   } else {
     ASSERT(exponent_type.IsTagged());
-    CpuFeatures::Scope scope(SSE2);
     Register right_reg = ToRegister(right);
 
     Label non_smi, call;
@@ -2749,13 +2834,13 @@
     __ bind(&call);
     __ PrepareCallCFunction(2);
     // Move arguments to correct registers xmm0 and xmm1.
-    __ movsd(xmm0, left_reg);
+    __ movaps(xmm0, left_reg);
     // Right argument is already in xmm1.
     __ CallCFunction(
         ExternalReference::power_double_double_function(isolate()), 2);
   }
   // Return value is in xmm0.
-  __ movsd(result_reg, xmm0);
+  __ movaps(result_reg, xmm0);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
@@ -2818,6 +2903,21 @@
 }
 
 
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  ASSERT(ToRegister(instr->function()).is(rdi));
+  ASSERT(instr->HasPointerMap());
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LPointerMap* pointers = instr->pointer_map();
+  LEnvironment* env = instr->deoptimization_environment();
+  RecordPosition(pointers->position());
+  RegisterEnvironmentForDeoptimization(env);
+  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  ParameterCount count(instr->arity());
+  __ InvokeFunction(rdi, count, CALL_FUNCTION, &generator);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+}
+
+
 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->result()).is(rax));
@@ -2921,7 +3021,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->hydrogen()->name());
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3017,13 +3117,21 @@
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->value()).is(rax));
 
-  Handle<Code> ic = info_->is_strict()
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  EmitPushTaggedOperand(instr->left());
+  EmitPushTaggedOperand(instr->right());
+  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
@@ -3138,7 +3246,7 @@
   // contained in the register pointer map.
   __ Set(result, 0);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -3151,16 +3259,12 @@
     __ Integer32ToSmi(index, index);
     __ push(index);
   }
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
   if (FLAG_debug_code) {
     __ AbortIfNotSmi(rax);
   }
   __ SmiToInteger32(rax, rax);
   __ StoreToSafepointRegisterSlot(result, rax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3203,14 +3307,11 @@
   // contained in the register pointer map.
   __ Set(result, 0);
 
-  __ PushSafepointRegisters();
+  PushSafepointRegistersScope scope(this);
   __ Integer32ToSmi(char_code, char_code);
   __ push(char_code);
-  __ CallRuntimeSaveDoubles(Runtime::kCharFromCode);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 1, Safepoint::kNoDeoptimizationIndex);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
   __ StoreToSafepointRegisterSlot(result, rax);
-  __ PopSafepointRegisters();
 }
 
 
@@ -3275,13 +3376,12 @@
   Register reg = ToRegister(instr->result());
   __ Move(reg, Smi::FromInt(0));
 
-  __ PushSafepointRegisters();
-  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
-  // Ensure that value in rax survives popping registers.
-  __ movq(kScratchRegister, rax);
-  __ PopSafepointRegisters();
+  {
+    PushSafepointRegistersScope scope(this);
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+    // Ensure that value in rax survives popping registers.
+    __ movq(kScratchRegister, rax);
+  }
   __ movq(reg, kScratchRegister);
 }
 
@@ -3322,7 +3422,7 @@
   DeoptimizeIf(not_equal, env);
 
   // Convert undefined to NaN. Compute NaN as 0/0.
-  __ xorpd(result_reg, result_reg);
+  __ xorps(result_reg, result_reg);
   __ divsd(result_reg, result_reg);
   __ jmp(&done);
 
@@ -3363,7 +3463,7 @@
     // conversions.
     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
     DeoptimizeIf(not_equal, instr->environment());
-    __ movl(input_reg, Immediate(0));
+    __ Set(input_reg, 0);
     __ jmp(&done);
 
     __ bind(&heap_number);
@@ -3371,7 +3471,7 @@
     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     __ cvttsd2siq(input_reg, xmm0);
     __ Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
-    __ cmpl(input_reg, kScratchRegister);
+    __ cmpq(input_reg, kScratchRegister);
     DeoptimizeIf(equal, instr->environment());
   } else {
     // Deoptimize if we don't have a heap number.
@@ -3436,7 +3536,7 @@
     // the JS bitwise operations.
     __ cvttsd2siq(result_reg, input_reg);
     __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
-    __ cmpl(result_reg, kScratchRegister);
+    __ cmpq(result_reg, kScratchRegister);
       DeoptimizeIf(equal, instr->environment());
   } else {
     __ cvttsd2si(result_reg, input_reg);
@@ -3691,14 +3791,7 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(0);
-  if (input->IsConstantOperand()) {
-    __ Push(ToHandle(LConstantOperand::cast(input)));
-  } else if (input->IsRegister()) {
-    __ push(ToRegister(input));
-  } else {
-    ASSERT(input->IsStackSlot());
-    __ push(ToOperand(input));
-  }
+  EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
@@ -3726,19 +3819,14 @@
 }
 
 
-void LCodeGen::EmitPushConstantOperand(LOperand* operand) {
-  ASSERT(operand->IsConstantOperand());
-  LConstantOperand* const_op = LConstantOperand::cast(operand);
-  Handle<Object> literal = chunk_->LookupLiteral(const_op);
-  Representation r = chunk_->LookupLiteralRepresentation(const_op);
-  if (r.IsInteger32()) {
-    ASSERT(literal->IsNumber());
-    __ push(Immediate(static_cast<int32_t>(literal->Number())));
-  } else if (r.IsDouble()) {
-    Abort("unsupported double immediate");
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  ASSERT(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
   } else {
-    ASSERT(r.IsTagged());
-    __ Push(literal);
+    __ push(ToOperand(operand));
   }
 }
 
@@ -3884,20 +3972,8 @@
 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  // Push object.
-  if (obj->IsRegister()) {
-    __ push(ToRegister(obj));
-  } else {
-    __ push(ToOperand(obj));
-  }
-  // Push key.
-  if (key->IsConstantOperand()) {
-    EmitPushConstantOperand(key);
-  } else if (key->IsRegister()) {
-    __ push(ToRegister(key));
-  } else {
-    __ push(ToOperand(key));
-  }
+  EmitPushTaggedOperand(obj);
+  EmitPushTaggedOperand(key);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   LEnvironment* env = instr->deoptimization_environment();
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index f44fdb9..96e0a0f 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -60,7 +60,8 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
-        resolver_(this) {
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
   }
 
@@ -124,7 +125,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   int strict_mode_flag() const {
-    return info()->is_strict() ? kStrictMode : kNonStrictMode;
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -140,8 +141,8 @@
                        Register input,
                        Register temporary);
 
-  int StackSlotCount() const { return chunk()->spill_slot_count(); }
-  int ParameterCount() const { return scope()->num_parameters(); }
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+  int GetParameterCount() const { return scope()->num_parameters(); }
 
   void Abort(const char* format, ...);
   void Comment(const char* format, ...);
@@ -156,12 +157,26 @@
   bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS
+  };
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode,
+                       int argc);
+
+
   void CallCode(Handle<Code> code,
                 RelocInfo::Mode mode,
                 LInstruction* instr);
+
   void CallRuntime(const Runtime::Function* function,
                    int num_arguments,
                    LInstruction* instr);
+
   void CallRuntime(Runtime::FunctionId id,
                    int num_arguments,
                    LInstruction* instr) {
@@ -169,6 +184,11 @@
     CallRuntime(function, num_arguments, instr);
   }
 
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr);
+
+
   // Generate a direct call to a known function.  Expects the function
   // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
@@ -177,7 +197,9 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr);
+  void RegisterLazyDeoptimization(LInstruction* instr,
+                                  SafepointMode safepoint_mode,
+                                  int argc);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
 
@@ -246,11 +268,12 @@
                      Handle<Map> type,
                      Handle<String> name);
 
-  // Emits code for pushing a constant operand.
-  void EmitPushConstantOperand(LOperand* operand);
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
 
   struct JumpTableEntry {
-    inline JumpTableEntry(Address entry)
+    explicit inline JumpTableEntry(Address entry)
         : label(),
           address(entry) { }
     Label label;
@@ -281,6 +304,27 @@
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    }
+
+    ~PushSafepointRegistersScope() {
+      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index cedd025..c3c617c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -214,7 +214,7 @@
   } else if (source->IsDoubleRegister()) {
     XMMRegister src = cgen_->ToDoubleRegister(source);
     if (destination->IsDoubleRegister()) {
-      __ movsd(cgen_->ToDoubleRegister(destination), src);
+      __ movaps(cgen_->ToDoubleRegister(destination), src);
     } else {
       ASSERT(destination->IsDoubleStackSlot());
       __ movsd(cgen_->ToOperand(destination), src);
@@ -273,9 +273,9 @@
     // Swap two double registers.
     XMMRegister source_reg = cgen_->ToDoubleRegister(source);
     XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
-    __ movsd(xmm0, source_reg);
-    __ movsd(source_reg, destination_reg);
-    __ movsd(destination_reg, xmm0);
+    __ movaps(xmm0, source_reg);
+    __ movaps(source_reg, destination_reg);
+    __ movaps(destination_reg, xmm0);
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // Swap a double register and a double stack slot.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index c47cd72..620bbc9 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -71,22 +71,21 @@
 
 #ifdef DEBUG
 void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as
-  // temporaries and outputs because all registers
-  // are blocked by the calling convention.
-  // Inputs must use a fixed register.
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
   ASSERT(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
   }
   for (TempIterator it(this); it.HasNext(); it.Advance()) {
-    LOperand* operand = it.Next();
-    ASSERT(LUnallocated::cast(operand)->HasFixedPolicy() ||
-           !LUnallocated::cast(operand)->HasRegisterPolicy());
+    LUnallocated* operand = LUnallocated::cast(it.Next());
+    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -303,6 +302,13 @@
 }
 
 
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
 void LCallKeyed::PrintDataTo(StringStream* stream) {
   stream->Add("[rcx] #%d / ", arity());
 }
@@ -1114,9 +1120,9 @@
       return new LIsConstructCallAndBranch(TempRegister());
     } else {
       if (v->IsConstant()) {
-        if (HConstant::cast(v)->handle()->IsTrue()) {
+        if (HConstant::cast(v)->ToBoolean()) {
           return new LGoto(instr->FirstSuccessor()->block_id());
-        } else if (HConstant::cast(v)->handle()->IsFalse()) {
+        } else {
           return new LGoto(instr->SecondSuccessor()->block_id());
         }
       }
@@ -1211,6 +1217,14 @@
 }
 
 
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
+  argument_count_ -= instr->argument_count();
+  LInvokeFunction* result = new LInvokeFunction(function);
+  return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
   if (op == kMathLog || op == kMathSin || op == kMathCos) {
@@ -1613,11 +1627,8 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       if (needs_check) {
-        LOperand* xmm_temp =
-            (instr->CanTruncateToInt32() &&
-             Isolate::Current()->cpu_features()->IsSupported(SSE3))
-            ? NULL
-            : FixedTemp(xmm1);
+        LOperand* xmm_temp = instr->CanTruncateToInt32() ? NULL
+                                                         : FixedTemp(xmm1);
         LTaggedToI* res = new LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
@@ -1718,21 +1729,36 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
-LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
-  LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
-                                          TempRegister());
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), rax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LStoreGlobalCell* result =
+      new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
   return instr->check_hole_value() ? AssignEnvironment(result) : result;
 }
 
 
+LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), rdx);
+  LOperand* value = UseFixed(instr->value(), rax);
+  LStoreGlobalGeneric* result =  new LStoreGlobalGeneric(global_object, value);
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new LLoadContextSlot(context));
@@ -1877,7 +1903,7 @@
       array_type == kExternalFloatArray;
   LOperand* val = val_is_temp_register
       ? UseTempRegister(instr->value())
-      : UseRegister(instr->key());
+      : UseRegister(instr->value());
   LOperand* key = UseRegister(instr->key());
 
   return new LStoreKeyedSpecializedArrayElement(external_pointer,
@@ -1929,6 +1955,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* left = UseOrConstantAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseRegister(instr->string());
   LOperand* index = UseRegisterOrConstant(instr->index());
@@ -1972,7 +2005,8 @@
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LDeleteProperty* result =
-      new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2058,7 +2092,6 @@
       env->Push(value);
     }
   }
-  ASSERT(env->length() == instr->environment_length());
 
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index e94debf..74f4820 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -98,14 +98,15 @@
   V(GlobalObject)                               \
   V(GlobalReceiver)                             \
   V(Goto)                                       \
-  V(HasInstanceType)                            \
-  V(HasInstanceTypeAndBranch)                   \
   V(HasCachedArrayIndex)                        \
   V(HasCachedArrayIndexAndBranch)               \
+  V(HasInstanceType)                            \
+  V(HasInstanceTypeAndBranch)                   \
   V(InstanceOf)                                 \
   V(InstanceOfAndBranch)                        \
   V(InstanceOfKnownGlobal)                      \
   V(Integer32ToDouble)                          \
+  V(InvokeFunction)                             \
   V(IsNull)                                     \
   V(IsNullAndBranch)                            \
   V(IsObject)                                   \
@@ -118,7 +119,8 @@
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadKeyedSpecializedArrayElement)           \
@@ -144,12 +146,14 @@
   V(SmiUntag)                                   \
   V(StackCheck)                                 \
   V(StoreContextSlot)                           \
-  V(StoreGlobal)                                \
+  V(StoreGlobalCell)                            \
+  V(StoreGlobalGeneric)                         \
   V(StoreKeyedFastElement)                      \
   V(StoreKeyedGeneric)                          \
   V(StoreKeyedSpecializedArrayElement)          \
   V(StoreNamedField)                            \
   V(StoreNamedGeneric)                          \
+  V(StringAdd)                                  \
   V(StringCharCodeAt)                           \
   V(StringCharFromCode)                         \
   V(StringLength)                               \
@@ -1245,22 +1249,55 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LStoreGlobal(LOperand* value, LOperand* temp) {
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LStoreGlobalGeneric(LOperand* global_object,
+                               LOperand* value) {
+    inputs_[0] = global_object;
+    inputs_[1] = value;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
+
+  LOperand* global_object() { return InputAt(0); }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LOperand* value() { return InputAt(1); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1358,6 +1395,23 @@
 };
 
 
+class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInvokeFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  LOperand* function() { return inputs_[0]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
 class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallKeyed(LOperand* key) {
@@ -1582,6 +1636,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1637,12 +1692,29 @@
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
   virtual void PrintDataTo(StringStream* stream);
 
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStringAdd: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringAdd(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 654814c..3394206 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "bootstrapper.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "assembler-x64.h"
 #include "macro-assembler-x64.h"
 #include "serialize.h"
@@ -40,12 +40,15 @@
 namespace v8 {
 namespace internal {
 
-MacroAssembler::MacroAssembler(void* buffer, int size)
-    : Assembler(buffer, size),
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
-      root_array_available_(true),
-      code_object_(isolate()->heap()->undefined_value()) {
+      root_array_available_(true) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
 }
 
 
@@ -647,6 +650,7 @@
   Label leave_exit_frame;
   Label write_back;
 
+  Factory* factory = isolate()->factory();
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -694,7 +698,7 @@
 
   // Check if the function scheduled an exception.
   movq(rsi, scheduled_exception_address);
-  Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
+  Cmp(Operand(rsi, 0), factory->the_hole_value());
   j(not_equal, &promote_scheduled_exception);
 
   LeaveApiExitFrame();
@@ -709,7 +713,7 @@
 
   bind(&empty_result);
   // It was zero; the result is undefined.
-  Move(rax, FACTORY->undefined_value());
+  Move(rax, factory->undefined_value());
   jmp(&prologue);
 
   // HandleScope limit has changed. Delete allocated extensions.
@@ -785,10 +789,10 @@
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
-  } else if (is_int32(x)) {
-    movq(dst, Immediate(static_cast<int32_t>(x)));
   } else if (is_uint32(x)) {
     movl(dst, Immediate(static_cast<uint32_t>(x)));
+  } else if (is_int32(x)) {
+    movq(dst, Immediate(static_cast<int32_t>(x)));
   } else {
     movq(dst, x, RelocInfo::NONE);
   }
@@ -798,7 +802,7 @@
   if (is_int32(x)) {
     movq(dst, Immediate(static_cast<int32_t>(x)));
   } else {
-    movq(kScratchRegister, x, RelocInfo::NONE);
+    Set(kScratchRegister, x);
     movq(dst, kScratchRegister);
   }
 }
@@ -1244,12 +1248,17 @@
                             Register src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible.
-  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movq(dst, src1);
+    if (emit_debug_code()) {
+      movq(kScratchRegister, src1);
+      addq(kScratchRegister, src2);
+      Check(no_overflow, "Smi addition overflow");
+    }
+    lea(dst, Operand(src1, src2, times_1, 0));
+  } else {
+    addq(dst, src2);
+    Assert(no_overflow, "Smi addition overflow");
   }
-  addq(dst, src2);
-  Assert(no_overflow, "Smi addition overflow");
 }
 
 
@@ -1317,6 +1326,7 @@
 
 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
+    ASSERT(!src1.is(src2));
     movq(dst, src1);
   }
   or_(dst, src2);
@@ -1337,6 +1347,7 @@
 
 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
+    ASSERT(!src1.is(src2));
     movq(dst, src1);
   }
   xor_(dst, src2);
@@ -1809,7 +1820,7 @@
     // Set external caught exception to false.
     ExternalReference external_caught(
         Isolate::k_external_caught_exception_address, isolate());
-    movq(rax, Immediate(false));
+    Set(rax, static_cast<int64_t>(false));
     Store(external_caught, rax);
 
     // Set pending exception and rax to out of memory exception.
@@ -1890,7 +1901,7 @@
   Condition is_smi = CheckSmi(object);
   j(is_smi, &ok);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
-      FACTORY->heap_number_map());
+      isolate()->factory()->heap_number_map());
   Assert(equal, "Operand not a number");
   bind(&ok);
 }
@@ -1997,7 +2008,7 @@
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand counter_operand = ExternalOperand(ExternalReference(counter));
-    movq(counter_operand, Immediate(value));
+    movl(counter_operand, Immediate(value));
   }
 }
 
@@ -2147,7 +2158,7 @@
   push(kScratchRegister);
   if (emit_debug_code()) {
     movq(kScratchRegister,
-         FACTORY->undefined_value(),
+         isolate()->factory()->undefined_value(),
          RelocInfo::EMBEDDED_OBJECT);
     cmpq(Operand(rsp, 0), kScratchRegister);
     Check(not_equal, "code object not properly patched");
@@ -2199,7 +2210,6 @@
 #endif
   // Optionally save all XMM registers.
   if (save_doubles) {
-    CpuFeatures::Scope scope(SSE2);
     int space = XMMRegister::kNumRegisters * kDoubleSize +
         arg_stack_space * kPointerSize;
     subq(rsp, Immediate(space));
@@ -2216,8 +2226,8 @@
   const int kFrameAlignment = OS::ActivationFrameAlignment();
   if (kFrameAlignment > 0) {
     ASSERT(IsPowerOf2(kFrameAlignment));
-    movq(kScratchRegister, Immediate(-kFrameAlignment));
-    and_(rsp, kScratchRegister);
+    ASSERT(is_int8(kFrameAlignment));
+    and_(rsp, Immediate(-kFrameAlignment));
   }
 
   // Patch the saved entry sp.
@@ -2316,7 +2326,7 @@
   // Check the context is a global context.
   if (emit_debug_code()) {
     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
-        FACTORY->global_context_map());
+        isolate()->factory()->global_context_map());
     Check(equal, "JSGlobalObject::global_context should be a global context.");
   }
 
@@ -2818,7 +2828,7 @@
   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, FACTORY->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, false);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");
@@ -2851,9 +2861,6 @@
   ASSERT(frame_alignment != 0);
   ASSERT(num_arguments >= 0);
 
-  // Reserve space for Isolate address which is always passed as last parameter
-  num_arguments += 1;
-
   // Make stack end at alignment and allocate space for arguments and old rsp.
   movq(kScratchRegister, rsp);
   ASSERT(IsPowerOf2(frame_alignment));
@@ -2873,26 +2880,6 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
-  // Pass current isolate address as additional parameter.
-  if (num_arguments < kRegisterPassedArguments) {
-#ifdef _WIN64
-    // First four arguments are passed in registers on Windows.
-    Register arg_to_reg[] = {rcx, rdx, r8, r9};
-#else
-    // First six arguments are passed in registers on other platforms.
-    Register arg_to_reg[] = {rdi, rsi, rdx, rcx, r8, r9};
-#endif
-    Register reg = arg_to_reg[num_arguments];
-    LoadAddress(reg, ExternalReference::isolate_address());
-  } else {
-    // Push Isolate pointer after all parameters.
-    int argument_slots_on_stack =
-        ArgumentStackSlotsForCFunctionCall(num_arguments);
-    LoadAddress(kScratchRegister, ExternalReference::isolate_address());
-    movq(Operand(rsp, argument_slots_on_stack * kPointerSize),
-         kScratchRegister);
-  }
-
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -2901,7 +2888,6 @@
   call(function);
   ASSERT(OS::ActivationFrameAlignment() != 0);
   ASSERT(num_arguments >= 0);
-  num_arguments += 1;
   int argument_slots_on_stack =
       ArgumentStackSlotsForCFunctionCall(num_arguments);
   movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
@@ -2909,7 +2895,9 @@
 
 
 CodePatcher::CodePatcher(byte* address, int size)
-    : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
+    : address_(address),
+      size_(size),
+      masm_(Isolate::Current(), address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 1ee0fe0..4c17720 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -74,7 +74,11 @@
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
-  MacroAssembler(void* buffer, int size);
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
 
   // Prevent the use of the RootArray during the lifetime of this
   // scope object.
@@ -319,6 +323,16 @@
                                            Register src,
                                            int power);
 
+  // Perform the logical or of two smi values and return a smi value.
+  // If either argument is not a smi, jump to on_not_smis and retain
+  // the original values of source registers. The destination register
+  // may be changed if it's not one of the source registers.
+  template <typename LabelType>
+  void SmiOrIfSmis(Register dst,
+                   Register src1,
+                   Register src2,
+                   LabelType* on_not_smis);
+
 
   // Simple comparison of smis.  Both sides must be known smis to use these,
   // otherwise use Cmp.
@@ -1029,7 +1043,10 @@
   // may be bigger than 2^16 - 1.  Requires a scratch register.
   void Ret(int bytes_dropped, Register scratch);
 
-  Handle<Object> CodeObject() { return code_object_; }
+  Handle<Object> CodeObject() {
+    ASSERT(!code_object_.is_null());
+    return code_object_;
+  }
 
   // Copy length bytes from source to destination.
   // Uses scratch register internally (if you have a low-eight register
@@ -1076,6 +1093,10 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  static int SafepointRegisterStackIndex(Register reg) {
+    return SafepointRegisterStackIndex(reg.code());
+  }
+
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1779,6 +1800,24 @@
 
 
 template <typename LabelType>
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+                                 LabelType* on_not_smis) {
+  if (dst.is(src1) || dst.is(src2)) {
+    ASSERT(!src1.is(kScratchRegister));
+    ASSERT(!src2.is(kScratchRegister));
+    movq(kScratchRegister, src1);
+    or_(kScratchRegister, src2);
+    JumpIfNotSmi(kScratchRegister, on_not_smis);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    or_(dst, src2);
+    JumpIfNotSmi(dst, on_not_smis);
+  }
+}
+
+
+template <typename LabelType>
 void MacroAssembler::JumpIfNotString(Register object,
                                      Register object_map,
                                      LabelType* not_string) {
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 269e7af..d4ccb0e 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -114,7 +114,7 @@
 RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
     Mode mode,
     int registers_to_save)
-    : masm_(NULL, kRegExpCodeSize),
+    : masm_(Isolate::Current(), NULL, kRegExpCodeSize),
       no_root_array_scope_(&masm_),
       code_relative_fixup_positions_(4),
       mode_(mode),
@@ -402,13 +402,14 @@
 #endif
     __ push(backtrack_stackpointer());
 
-    static const int num_arguments = 3;
+    static const int num_arguments = 4;
     __ PrepareCallCFunction(num_arguments);
 
     // Put arguments into parameter registers. Parameters are
     //   Address byte_offset1 - Address captured substring's start.
     //   Address byte_offset2 - Address of current character position.
     //   size_t byte_length - length of capture in bytes(!)
+    //   Isolate* isolate
 #ifdef _WIN64
     // Compute and set byte_offset1 (start of capture).
     __ lea(rcx, Operand(rsi, rdx, times_1, 0));
@@ -416,6 +417,8 @@
     __ lea(rdx, Operand(rsi, rdi, times_1, 0));
     // Set byte_length.
     __ movq(r8, rbx);
+    // Isolate.
+    __ LoadAddress(r9, ExternalReference::isolate_address());
 #else  // AMD64 calling convention
     // Compute byte_offset2 (current position = rsi+rdi).
     __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -425,6 +428,8 @@
     __ movq(rsi, rax);
     // Set byte_length.
     __ movq(rdx, rbx);
+    // Isolate.
+    __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
     ExternalReference compare =
         ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
@@ -757,7 +762,7 @@
   __ j(above_equal, &stack_ok);
   // Exit with OutOfMemory exception. There is not enough space on the stack
   // for our working registers.
-  __ movq(rax, Immediate(EXCEPTION));
+  __ Set(rax, EXCEPTION);
   __ jmp(&exit_label_);
 
   __ bind(&stack_limit_hit);
@@ -794,7 +799,7 @@
     // Fill saved registers with initial value = start offset - 1
     // Fill in stack push order, to avoid accessing across an unwritten
     // page (a problem on Windows).
-    __ movq(rcx, Immediate(kRegisterZero));
+    __ Set(rcx, kRegisterZero);
     Label init_loop;
     __ bind(&init_loop);
     __ movq(Operand(rbp, rcx, times_1, 0), rax);
@@ -824,7 +829,7 @@
   LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
   __ jmp(&start_label_);
   __ bind(&at_start);
-  __ movq(current_character(), Immediate('\n'));
+  __ Set(current_character(), '\n');
   __ jmp(&start_label_);
 
 
@@ -852,7 +857,7 @@
         __ movl(Operand(rbx, i * kIntSize), rax);
       }
     }
-    __ movq(rax, Immediate(SUCCESS));
+    __ Set(rax, SUCCESS);
   }
 
   // Exit and return rax
@@ -919,16 +924,18 @@
 #endif
 
     // Call GrowStack(backtrack_stackpointer())
-    static const int num_arguments = 2;
+    static const int num_arguments = 3;
     __ PrepareCallCFunction(num_arguments);
 #ifdef _WIN64
-    // Microsoft passes parameters in rcx, rdx.
+    // Microsoft passes parameters in rcx, rdx, r8.
     // First argument, backtrack stackpointer, is already in rcx.
     __ lea(rdx, Operand(rbp, kStackHighEnd));  // Second argument
+    __ LoadAddress(r8, ExternalReference::isolate_address());
 #else
-    // AMD64 ABI passes parameters in rdi, rsi.
+    // AMD64 ABI passes parameters in rdi, rsi, rdx.
     __ movq(rdi, backtrack_stackpointer());   // First argument.
     __ lea(rsi, Operand(rbp, kStackHighEnd));  // Second argument.
+    __ LoadAddress(rdx, ExternalReference::isolate_address());
 #endif
     ExternalReference grow_stack =
         ExternalReference::re_grow_stack(masm_.isolate());
@@ -952,7 +959,7 @@
     // If any of the code above needed to exit with an exception.
     __ bind(&exit_with_exception);
     // Exit with Result EXCEPTION(-1) to signal thrown exception.
-    __ movq(rax, Immediate(EXCEPTION));
+    __ Set(rax, EXCEPTION);
     __ jmp(&exit_label_);
   }
 
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
deleted file mode 100644
index 5df3d54..0000000
--- a/src/x64/register-allocator-x64-inl.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
-
-#include "v8.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-bool RegisterAllocator::IsReserved(Register reg) {
-  return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
-      reg.is(kScratchRegister) || reg.is(kRootRegister) ||
-      reg.is(kSmiConstantRegister);
-}
-
-
-// The register allocator uses small integers to represent the
-// non-reserved assembler registers.
-int RegisterAllocator::ToNumber(Register reg) {
-  ASSERT(reg.is_valid() && !IsReserved(reg));
-  const int kNumbers[] = {
-    0,   // rax
-    2,   // rcx
-    3,   // rdx
-    1,   // rbx
-    -1,  // rsp  Stack pointer.
-    -1,  // rbp  Frame pointer.
-    -1,  // rsi  Context.
-    4,   // rdi
-    5,   // r8
-    6,   // r9
-    -1,  // r10  Scratch register.
-    8,   // r11
-    -1,  // r12  Smi constant.
-    -1,  // r13  Roots array.  This is callee saved.
-    7,   // r14
-    9    // r15
-  };
-  return kNumbers[reg.code()];
-}
-
-
-Register RegisterAllocator::ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
-  const Register kRegisters[] =
-      { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r15 };
-  return kRegisters[num];
-}
-
-
-void RegisterAllocator::Initialize() {
-  Reset();
-  // The non-reserved rdi register is live on JS function entry.
-  Use(rdi);  // JS function.
-}
-} }  // namespace v8::internal
-
-#endif  // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
deleted file mode 100644
index 65189f5..0000000
--- a/src/x64/register-allocator-x64.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Result implementation.
-
-void Result::ToRegister() {
-  ASSERT(is_valid());
-  if (is_constant()) {
-    CodeGenerator* code_generator =
-        CodeGeneratorScope::Current(Isolate::Current());
-    Result fresh = code_generator->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    code_generator->masm()->Move(fresh.reg(), handle());
-    // This result becomes a copy of the fresh one.
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  }
-  ASSERT(is_register());
-}
-
-
-void Result::ToRegister(Register target) {
-  ASSERT(is_valid());
-  CodeGenerator* code_generator =
-      CodeGeneratorScope::Current(Isolate::Current());
-  if (!is_register() || !reg().is(target)) {
-    Result fresh = code_generator->allocator()->Allocate(target);
-    ASSERT(fresh.is_valid());
-    if (is_register()) {
-      code_generator->masm()->movq(fresh.reg(), reg());
-    } else {
-      ASSERT(is_constant());
-      code_generator->masm()->Move(fresh.reg(), handle());
-    }
-    fresh.set_type_info(type_info());
-    *this = fresh;
-  } else if (is_register() && reg().is(target)) {
-    ASSERT(code_generator->has_valid_frame());
-    code_generator->frame()->Spill(target);
-    ASSERT(code_generator->allocator()->count(target) == 1);
-  }
-  ASSERT(is_register());
-  ASSERT(reg().is(target));
-}
-
-
-// -------------------------------------------------------------------------
-// RegisterAllocator implementation.
-
-Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
-  // This function is not used in 64-bit code.
-  UNREACHABLE();
-  return Result();
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
deleted file mode 100644
index a2884d9..0000000
--- a/src/x64/register-allocator-x64.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
-#define V8_X64_REGISTER_ALLOCATOR_X64_H_
-
-namespace v8 {
-namespace internal {
-
-class RegisterAllocatorConstants : public AllStatic {
- public:
-  static const int kNumRegisters = 10;
-  static const int kInvalidRegister = -1;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 7494fe0..c19d29d 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -30,7 +30,7 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "ic-inl.h"
-#include "codegen-inl.h"
+#include "codegen.h"
 #include "stub-cache.h"
 
 namespace v8 {
@@ -399,7 +399,7 @@
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
                         masm->isolate());
-  __ movq(rax, Immediate(5));
+  __ Set(rax, 5);
   __ LoadAddress(rbx, ref);
 
   CEntryStub stub(1);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
deleted file mode 100644
index 10c327a..0000000
--- a/src/x64/virtual-frame-x64.cc
+++ /dev/null
@@ -1,1296 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#if defined(V8_TARGET_ARCH_X64)
-
-#include "codegen-inl.h"
-#include "register-allocator-inl.h"
-#include "scopes.h"
-#include "stub-cache.h"
-#include "virtual-frame-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm())
-
-void VirtualFrame::Enter() {
-  // Registers live on entry to a JS frame:
-  //   rsp: stack pointer, points to return address from this function.
-  //   rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
-  //        Trampoline frame.
-  //   rsi: context of this function call.
-  //   rdi: pointer to this function object.
-  Comment cmnt(masm(), "[ Enter JS frame");
-
-#ifdef DEBUG
-  if (FLAG_debug_code) {
-    // Verify that rdi contains a JS function.  The following code
-    // relies on rax being available for use.
-    Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
-    __ Check(not_smi,
-             "VirtualFrame::Enter - rdi is not a function (smi check).");
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
-    __ Check(equal,
-             "VirtualFrame::Enter - rdi is not a function (map check).");
-  }
-#endif
-
-  EmitPush(rbp);
-
-  __ movq(rbp, rsp);
-
-  // Store the context in the frame.  The context is kept in rsi and a
-  // copy is stored in the frame.  The external reference to rsi
-  // remains.
-  EmitPush(rsi);
-
-  // Store the function in the frame.  The frame owns the register
-  // reference now (ie, it can keep it in rdi or spill it later).
-  Push(rdi);
-  SyncElementAt(element_count() - 1);
-  cgen()->allocator()->Unuse(rdi);
-}
-
-
-void VirtualFrame::Exit() {
-  Comment cmnt(masm(), "[ Exit JS frame");
-  // Record the location of the JS exit code for patching when setting
-  // break point.
-  __ RecordJSReturn();
-
-  // Avoid using the leave instruction here, because it is too
-  // short. We need the return sequence to be a least the size of a
-  // call instruction to support patching the exit code in the
-  // debugger. See GenerateReturnSequence for the full return sequence.
-  // TODO(X64): A patched call will be very long now.  Make sure we
-  // have enough room.
-  __ movq(rsp, rbp);
-  stack_pointer_ = frame_pointer();
-  for (int i = element_count() - 1; i > stack_pointer_; i--) {
-    FrameElement last = elements_.RemoveLast();
-    if (last.is_register()) {
-      Unuse(last.reg());
-    }
-  }
-
-  EmitPop(rbp);
-}
-
-
-void VirtualFrame::AllocateStackSlots() {
-  int count = local_count();
-  if (count > 0) {
-    Comment cmnt(masm(), "[ Allocate space for locals");
-    // The locals are initialized to a constant (the undefined value), but
-    // we sync them with the actual frame to allocate space for spilling
-    // them later.  First sync everything above the stack pointer so we can
-    // use pushes to allocate and initialize the locals.
-    SyncRange(stack_pointer_ + 1, element_count() - 1);
-    Handle<Object> undefined = FACTORY->undefined_value();
-    FrameElement initial_value =
-        FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    if (count < kLocalVarBound) {
-      // For fewer locals the unrolled loop is more compact.
-
-      // Hope for one of the first eight registers, where the push operation
-      // takes only one byte (kScratchRegister needs the REX.W bit).
-      Result tmp = cgen()->allocator()->Allocate();
-      ASSERT(tmp.is_valid());
-      __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
-      for (int i = 0; i < count; i++) {
-        __ push(tmp.reg());
-      }
-    } else {
-      // For more locals a loop in generated code is more compact.
-      Label alloc_locals_loop;
-      Result cnt = cgen()->allocator()->Allocate();
-      ASSERT(cnt.is_valid());
-      __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
-#ifdef DEBUG
-      Label loop_size;
-      __ bind(&loop_size);
-#endif
-      if (is_uint8(count)) {
-        // Loading imm8 is shorter than loading imm32.
-        // Loading only partial byte register, and using decb below.
-        __ movb(cnt.reg(), Immediate(count));
-      } else {
-        __ movl(cnt.reg(), Immediate(count));
-      }
-      __ bind(&alloc_locals_loop);
-      __ push(kScratchRegister);
-      if (is_uint8(count)) {
-        __ decb(cnt.reg());
-      } else {
-        __ decl(cnt.reg());
-      }
-      __ j(not_zero, &alloc_locals_loop);
-#ifdef DEBUG
-      CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
-#endif
-    }
-    for (int i = 0; i < count; i++) {
-      elements_.Add(initial_value);
-      stack_pointer_++;
-    }
-  }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ movq(Operand(rbp, fp_relative(context_index())), rsi);
-}
-
-
-void VirtualFrame::RestoreContextRegister() {
-  ASSERT(elements_[context_index()].is_memory());
-  __ movq(rsi, Operand(rbp, fp_relative(context_index())));
-}
-
-
-void VirtualFrame::PushReceiverSlotAddress() {
-  Result temp = cgen()->allocator()->Allocate();
-  ASSERT(temp.is_valid());
-  __ lea(temp.reg(), ParameterAt(-1));
-  Push(&temp);
-}
-
-
-void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(reg);
-}
-
-
-void VirtualFrame::EmitPop(const Operand& operand) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  stack_pointer_--;
-  elements_.RemoveLast();
-  __ pop(operand);
-}
-
-
-void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(reg);
-}
-
-
-void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(operand);
-}
-
-
-void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ push(immediate);
-}
-
-
-void VirtualFrame::EmitPush(Smi* smi_value) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi()));
-  stack_pointer_++;
-  __ Push(smi_value);
-}
-
-
-void VirtualFrame::EmitPush(Handle<Object> value) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  TypeInfo info = TypeInfo::TypeFromValue(value);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ Push(value);
-}
-
-
-void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) {
-  ASSERT(stack_pointer_ == element_count() - 1);
-  elements_.Add(FrameElement::MemoryElement(info));
-  stack_pointer_++;
-  __ PushRoot(index);
-}
-
-
-void VirtualFrame::Push(Expression* expr) {
-  ASSERT(expr->IsTrivial());
-
-  Literal* lit = expr->AsLiteral();
-  if (lit != NULL) {
-    Push(lit->handle());
-    return;
-  }
-
-  VariableProxy* proxy = expr->AsVariableProxy();
-  if (proxy != NULL) {
-    Slot* slot = proxy->var()->AsSlot();
-    if (slot->type() == Slot::LOCAL) {
-      PushLocalAt(slot->index());
-      return;
-    }
-    if (slot->type() == Slot::PARAMETER) {
-      PushParameterAt(slot->index());
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void VirtualFrame::Push(Handle<Object> value) {
-  if (ConstantPoolOverflowed()) {
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    if (value->IsSmi()) {
-      __ Move(temp.reg(), Smi::cast(*value));
-    } else {
-      __ movq(temp.reg(), value, RelocInfo::EMBEDDED_OBJECT);
-    }
-    Push(&temp);
-  } else {
-    FrameElement element =
-        FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
-    elements_.Add(element);
-  }
-}
-
-
-void VirtualFrame::Drop(int count) {
-  ASSERT(count >= 0);
-  ASSERT(height() >= count);
-  int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
-  // Emit code to lower the stack pointer if necessary.
-  if (num_virtual_elements < count) {
-    int num_dropped = count - num_virtual_elements;
-    stack_pointer_ -= num_dropped;
-    __ addq(rsp, Immediate(num_dropped * kPointerSize));
-  }
-
-  // Discard elements from the virtual frame and free any registers.
-  for (int i = 0; i < count; i++) {
-    FrameElement dropped = elements_.RemoveLast();
-    if (dropped.is_register()) {
-      Unuse(dropped.reg());
-    }
-  }
-}
-
-
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
-  FrameElement original = elements_[index];
-
-  // Is this element the backing store of any copies?
-  int new_backing_index = kIllegalIndex;
-  if (original.is_copied()) {
-    // Verify it is copied, and find first copy.
-    for (int i = index + 1; i < element_count(); i++) {
-      if (elements_[i].is_copy() && elements_[i].index() == index) {
-        new_backing_index = i;
-        break;
-      }
-    }
-  }
-
-  if (new_backing_index == kIllegalIndex) {
-    // No copies found, return kIllegalIndex.
-    if (original.is_register()) {
-      Unuse(original.reg());
-    }
-    elements_[index] = FrameElement::InvalidElement();
-    return kIllegalIndex;
-  }
-
-  // This is the backing store of copies.
-  Register backing_reg;
-  if (original.is_memory()) {
-    Result fresh = cgen()->allocator()->Allocate();
-    ASSERT(fresh.is_valid());
-    Use(fresh.reg(), new_backing_index);
-    backing_reg = fresh.reg();
-    __ movq(backing_reg, Operand(rbp, fp_relative(index)));
-  } else {
-    // The original was in a register.
-    backing_reg = original.reg();
-    set_register_location(backing_reg, new_backing_index);
-  }
-  // Invalidate the element at index.
-  elements_[index] = FrameElement::InvalidElement();
-  // Set the new backing element.
-  if (elements_[new_backing_index].is_synced()) {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::SYNCED,
-                                      original.type_info());
-  } else {
-    elements_[new_backing_index] =
-        FrameElement::RegisterElement(backing_reg,
-                                      FrameElement::NOT_SYNCED,
-                                      original.type_info());
-  }
-  // Update the other copies.
-  for (int i = new_backing_index + 1; i < element_count(); i++) {
-    if (elements_[i].is_copy() && elements_[i].index() == index) {
-      elements_[i].set_index(new_backing_index);
-      elements_[new_backing_index].set_copied();
-    }
-  }
-  return new_backing_index;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
-  ASSERT(index >= 0);
-  ASSERT(index <= element_count());
-  FrameElement original = elements_[index];
-  int new_backing_store_index = InvalidateFrameSlotAt(index);
-  if (new_backing_store_index != kIllegalIndex) {
-    elements_.Add(CopyElementAt(new_backing_store_index));
-    return;
-  }
-
-  switch (original.type()) {
-    case FrameElement::MEMORY: {
-      // Emit code to load the original element's data into a register.
-      // Push that register as a FrameElement on top of the frame.
-      Result fresh = cgen()->allocator()->Allocate();
-      ASSERT(fresh.is_valid());
-      FrameElement new_element =
-          FrameElement::RegisterElement(fresh.reg(),
-                                        FrameElement::NOT_SYNCED,
-                                        original.type_info());
-      Use(fresh.reg(), element_count());
-      elements_.Add(new_element);
-      __ movq(fresh.reg(), Operand(rbp, fp_relative(index)));
-      break;
-    }
-    case FrameElement::REGISTER:
-      Use(original.reg(), element_count());
-      // Fall through.
-    case FrameElement::CONSTANT:
-    case FrameElement::COPY:
-      original.clear_sync();
-      elements_.Add(original);
-      break;
-    case FrameElement::INVALID:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
-  // Store the value on top of the frame to the virtual frame slot at
-  // a given index.  The value on top of the frame is left in place.
-  // This is a duplicating operation, so it can create copies.
-  ASSERT(index >= 0);
-  ASSERT(index < element_count());
-
-  int top_index = element_count() - 1;
-  FrameElement top = elements_[top_index];
-  FrameElement original = elements_[index];
-  if (top.is_copy() && top.index() == index) return;
-  ASSERT(top.is_valid());
-
-  InvalidateFrameSlotAt(index);
-
-  // InvalidateFrameSlotAt can potentially change any frame element, due
-  // to spilling registers to allocate temporaries in order to preserve
-  // the copy-on-write semantics of aliased elements.  Reload top from
-  // the frame.
-  top = elements_[top_index];
-
-  if (top.is_copy()) {
-    // There are two cases based on the relative positions of the
-    // stored-to slot and the backing slot of the top element.
-    int backing_index = top.index();
-    ASSERT(backing_index != index);
-    if (backing_index < index) {
-      // 1. The top element is a copy of a slot below the stored-to
-      // slot.  The stored-to slot becomes an unsynced copy of that
-      // same backing slot.
-      elements_[index] = CopyElementAt(backing_index);
-    } else {
-      // 2. The top element is a copy of a slot above the stored-to
-      // slot.  The stored-to slot becomes the new (unsynced) backing
-      // slot and both the top element and the element at the former
-      // backing slot become copies of it.  The sync state of the top
-      // and former backing elements is preserved.
-      FrameElement backing_element = elements_[backing_index];
-      ASSERT(backing_element.is_memory() || backing_element.is_register());
-      if (backing_element.is_memory()) {
-        // Because sets of copies are canonicalized to be backed by
-        // their lowest frame element, and because memory frame
-        // elements are backed by the corresponding stack address, we
-        // have to move the actual value down in the stack.
-        //
-        // TODO(209): considering allocating the stored-to slot to the
-        // temp register.  Alternatively, allow copies to appear in
-        // any order in the frame and lazily move the value down to
-        // the slot.
-        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
-        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-      } else {
-        set_register_location(backing_element.reg(), index);
-        if (backing_element.is_synced()) {
-          // If the element is a register, we will not actually move
-          // anything on the stack but only update the virtual frame
-          // element.
-          backing_element.clear_sync();
-        }
-      }
-      elements_[index] = backing_element;
-
-      // The old backing element becomes a copy of the new backing
-      // element.
-      FrameElement new_element = CopyElementAt(index);
-      elements_[backing_index] = new_element;
-      if (backing_element.is_synced()) {
-        elements_[backing_index].set_sync();
-      }
-
-      // All the copies of the old backing element (including the top
-      // element) become copies of the new backing element.
-      for (int i = backing_index + 1; i < element_count(); i++) {
-        if (elements_[i].is_copy() && elements_[i].index() == backing_index) {
-          elements_[i].set_index(index);
-        }
-      }
-    }
-    return;
-  }
-
-  // Move the top element to the stored-to slot and replace it (the
-  // top element) with a copy.
-  elements_[index] = top;
-  if (top.is_memory()) {
-    // TODO(209): consider allocating the stored-to slot to the temp
-    // register.  Alternatively, allow copies to appear in any order
-    // in the frame and lazily move the value down to the slot.
-    FrameElement new_top = CopyElementAt(index);
-    new_top.set_sync();
-    elements_[top_index] = new_top;
-
-    // The sync state of the former top element is correct (synced).
-    // Emit code to move the value down in the frame.
-    __ movq(kScratchRegister, Operand(rsp, 0));
-    __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-  } else if (top.is_register()) {
-    set_register_location(top.reg(), index);
-    // The stored-to slot has the (unsynced) register reference and
-    // the top element becomes a copy.  The sync state of the top is
-    // preserved.
-    FrameElement new_top = CopyElementAt(index);
-    if (top.is_synced()) {
-      new_top.set_sync();
-      elements_[index].clear_sync();
-    }
-    elements_[top_index] = new_top;
-  } else {
-    // The stored-to slot holds the same value as the top but
-    // unsynced.  (We do not have copies of constants yet.)
-    ASSERT(top.is_constant());
-    elements_[index].clear_sync();
-  }
-}
-
-
-void VirtualFrame::MakeMergable() {
-  for (int i = 0; i < element_count(); i++) {
-    FrameElement element = elements_[i];
-
-    // In all cases we have to reset the number type information
-    // to unknown for a mergable frame because of incoming back edges.
-    if (element.is_constant() || element.is_copy()) {
-      if (element.is_synced()) {
-        // Just spill.
-        elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown());
-      } else {
-        // Allocate to a register.
-        FrameElement backing_element;  // Invalid if not a copy.
-        if (element.is_copy()) {
-          backing_element = elements_[element.index()];
-        }
-        Result fresh = cgen()->allocator()->Allocate();
-        ASSERT(fresh.is_valid());  // A register was spilled if all were in use.
-        elements_[i] =
-            FrameElement::RegisterElement(fresh.reg(),
-                                          FrameElement::NOT_SYNCED,
-                                          TypeInfo::Unknown());
-        Use(fresh.reg(), i);
-
-        // Emit a move.
-        if (element.is_constant()) {
-          __ Move(fresh.reg(), element.handle());
-        } else {
-          ASSERT(element.is_copy());
-          // Copies are only backed by register or memory locations.
-          if (backing_element.is_register()) {
-            // The backing store may have been spilled by allocating,
-            // but that's OK.  If it was, the value is right where we
-            // want it.
-            if (!fresh.reg().is(backing_element.reg())) {
-              __ movq(fresh.reg(), backing_element.reg());
-            }
-          } else {
-            ASSERT(backing_element.is_memory());
-            __ movq(fresh.reg(), Operand(rbp, fp_relative(element.index())));
-          }
-        }
-      }
-      // No need to set the copied flag --- there are no copies.
-    } else {
-      // Clear the copy flag of non-constant, non-copy elements.
-      // They cannot be copied because copies are not allowed.
-      // The copy flag is not relied on before the end of this loop,
-      // including when registers are spilled.
-      elements_[i].clear_copied();
-      elements_[i].set_type_info(TypeInfo::Unknown());
-    }
-  }
-}
-
-
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
-  Comment cmnt(masm(), "[ Merge frame");
-  // We should always be merging the code generator's current frame to an
-  // expected frame.
-  ASSERT(cgen()->frame() == this);
-
-  // Adjust the stack pointer upward (toward the top of the virtual
-  // frame) if necessary.
-  if (stack_pointer_ < expected->stack_pointer_) {
-    int difference = expected->stack_pointer_ - stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ subq(rsp, Immediate(difference * kPointerSize));
-  }
-
-  MergeMoveRegistersToMemory(expected);
-  MergeMoveRegistersToRegisters(expected);
-  MergeMoveMemoryToRegisters(expected);
-
-  // Adjust the stack pointer downward if necessary.
-  if (stack_pointer_ > expected->stack_pointer_) {
-    int difference = stack_pointer_ - expected->stack_pointer_;
-    stack_pointer_ = expected->stack_pointer_;
-    __ addq(rsp, Immediate(difference * kPointerSize));
-  }
-
-  // At this point, the frames should be identical.
-  ASSERT(Equals(expected));
-}
-
-
-void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  // Move registers, constants, and copies to memory.  Perform moves
-  // from the top downward in the frame in order to leave the backing
-  // stores of copies in registers.
-  for (int i = element_count() - 1; i >= 0; i--) {
-    FrameElement target = expected->elements_[i];
-    if (target.is_register()) continue;  // Handle registers later.
-    if (target.is_memory()) {
-      FrameElement source = elements_[i];
-      switch (source.type()) {
-        case FrameElement::INVALID:
-          // Not a legal merge move.
-          UNREACHABLE();
-          break;
-
-        case FrameElement::MEMORY:
-          // Already in place.
-          break;
-
-        case FrameElement::REGISTER:
-          Unuse(source.reg());
-          if (!source.is_synced()) {
-            __ movq(Operand(rbp, fp_relative(i)), source.reg());
-          }
-          break;
-
-        case FrameElement::CONSTANT:
-          if (!source.is_synced()) {
-            __ Move(Operand(rbp, fp_relative(i)), source.handle());
-          }
-          break;
-
-        case FrameElement::COPY:
-          if (!source.is_synced()) {
-            int backing_index = source.index();
-            FrameElement backing_element = elements_[backing_index];
-            if (backing_element.is_memory()) {
-              __ movq(kScratchRegister,
-                       Operand(rbp, fp_relative(backing_index)));
-              __ movq(Operand(rbp, fp_relative(i)), kScratchRegister);
-            } else {
-              ASSERT(backing_element.is_register());
-              __ movq(Operand(rbp, fp_relative(i)), backing_element.reg());
-            }
-          }
-          break;
-      }
-    }
-    elements_[i] = target;
-  }
-}
-
-
-void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
-  // We have already done X-to-memory moves.
-  ASSERT(stack_pointer_ >= expected->stack_pointer_);
-
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    // Move the right value into register i if it is currently in a register.
-    int index = expected->register_location(i);
-    int use_index = register_location(i);
-    // Skip if register i is unused in the target or else if source is
-    // not a register (this is not a register-to-register move).
-    if (index == kIllegalIndex || !elements_[index].is_register()) continue;
-
-    Register target = RegisterAllocator::ToRegister(i);
-    Register source = elements_[index].reg();
-    if (index != use_index) {
-      if (use_index == kIllegalIndex) {  // Target is currently unused.
-        // Copy contents of source from source to target.
-        // Set frame element register to target.
-        Use(target, index);
-        Unuse(source);
-        __ movq(target, source);
-      } else {
-        // Exchange contents of registers source and target.
-        // Nothing except the register backing use_index has changed.
-        elements_[use_index].set_reg(source);
-        set_register_location(target, index);
-        set_register_location(source, use_index);
-        __ xchg(source, target);
-      }
-    }
-
-    if (!elements_[index].is_synced() &&
-        expected->elements_[index].is_synced()) {
-      __ movq(Operand(rbp, fp_relative(index)), target);
-    }
-    elements_[index] = expected->elements_[index];
-  }
-}
-
-
-void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
-  // Move memory, constants, and copies to registers.  This is the
-  // final step and since it is not done from the bottom up, but in
-  // register code order, we have special code to ensure that the backing
-  // elements of copies are in their correct locations when we
-  // encounter the copies.
-  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-    int index = expected->register_location(i);
-    if (index != kIllegalIndex) {
-      FrameElement source = elements_[index];
-      FrameElement target = expected->elements_[index];
-      Register target_reg = RegisterAllocator::ToRegister(i);
-      ASSERT(target.reg().is(target_reg));
-      switch (source.type()) {
-        case FrameElement::INVALID:  // Fall through.
-          UNREACHABLE();
-          break;
-        case FrameElement::REGISTER:
-          ASSERT(source.Equals(target));
-          // Go to next iteration.  Skips Use(target_reg) and syncing
-          // below.  It is safe to skip syncing because a target
-          // register frame element would only be synced if all source
-          // elements were.
-          continue;
-          break;
-        case FrameElement::MEMORY:
-          ASSERT(index <= stack_pointer_);
-          __ movq(target_reg, Operand(rbp, fp_relative(index)));
-          break;
-
-        case FrameElement::CONSTANT:
-          __ Move(target_reg, source.handle());
-          break;
-
-        case FrameElement::COPY: {
-          int backing_index = source.index();
-          FrameElement backing = elements_[backing_index];
-          ASSERT(backing.is_memory() || backing.is_register());
-          if (backing.is_memory()) {
-            ASSERT(backing_index <= stack_pointer_);
-            // Code optimization if backing store should also move
-            // to a register: move backing store to its register first.
-            if (expected->elements_[backing_index].is_register()) {
-              FrameElement new_backing = expected->elements_[backing_index];
-              Register new_backing_reg = new_backing.reg();
-              ASSERT(!is_used(new_backing_reg));
-              elements_[backing_index] = new_backing;
-              Use(new_backing_reg, backing_index);
-              __ movq(new_backing_reg,
-                      Operand(rbp, fp_relative(backing_index)));
-              __ movq(target_reg, new_backing_reg);
-            } else {
-              __ movq(target_reg, Operand(rbp, fp_relative(backing_index)));
-            }
-          } else {
-            __ movq(target_reg, backing.reg());
-          }
-        }
-      }
-      // Ensure the proper sync state.
-      if (target.is_synced() && !source.is_synced()) {
-        __ movq(Operand(rbp, fp_relative(index)), target_reg);
-      }
-      Use(target_reg, index);
-      elements_[index] = target;
-    }
-  }
-}
-
-
-Result VirtualFrame::Pop() {
-  FrameElement element = elements_.RemoveLast();
-  int index = element_count();
-  ASSERT(element.is_valid());
-
-  // Get number type information of the result.
-  TypeInfo info;
-  if (!element.is_copy()) {
-    info = element.type_info();
-  } else {
-    info = elements_[element.index()].type_info();
-  }
-
-  bool pop_needed = (stack_pointer_ == index);
-  if (pop_needed) {
-    stack_pointer_--;
-    if (element.is_memory()) {
-      Result temp = cgen()->allocator()->Allocate();
-      ASSERT(temp.is_valid());
-      __ pop(temp.reg());
-      temp.set_type_info(info);
-      return temp;
-    }
-
-    __ addq(rsp, Immediate(kPointerSize));
-  }
-  ASSERT(!element.is_memory());
-
-  // The top element is a register, constant, or a copy.  Unuse
-  // registers and follow copies to their backing store.
-  if (element.is_register()) {
-    Unuse(element.reg());
-  } else if (element.is_copy()) {
-    ASSERT(element.index() < index);
-    index = element.index();
-    element = elements_[index];
-  }
-  ASSERT(!element.is_copy());
-
-  // The element is memory, a register, or a constant.
-  if (element.is_memory()) {
-    // Memory elements could only be the backing store of a copy.
-    // Allocate the original to a register.
-    ASSERT(index <= stack_pointer_);
-    Result temp = cgen()->allocator()->Allocate();
-    ASSERT(temp.is_valid());
-    Use(temp.reg(), index);
-    FrameElement new_element =
-        FrameElement::RegisterElement(temp.reg(),
-                                      FrameElement::SYNCED,
-                                      element.type_info());
-    // Preserve the copy flag on the element.
-    if (element.is_copied()) new_element.set_copied();
-    elements_[index] = new_element;
-    __ movq(temp.reg(), Operand(rbp, fp_relative(index)));
-    return Result(temp.reg(), info);
-  } else if (element.is_register()) {
-    return Result(element.reg(), info);
-  } else {
-    ASSERT(element.is_constant());
-    return Result(element.handle());
-  }
-}
-
-
-Result VirtualFrame::RawCallStub(CodeStub* stub) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallStub(stub);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
-  PrepareForCall(0, 0);
-  arg->ToRegister(rax);
-  arg->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
-  PrepareForCall(0, 0);
-
-  if (arg0->is_register() && arg0->reg().is(rax)) {
-    if (arg1->is_register() && arg1->reg().is(rdx)) {
-      // Wrong registers.
-      __ xchg(rax, rdx);
-    } else {
-      // Register rdx is free for arg0, which frees rax for arg1.
-      arg0->ToRegister(rdx);
-      arg1->ToRegister(rax);
-    }
-  } else {
-    // Register rax is free for arg1, which guarantees rdx is free for
-    // arg0.
-    arg1->ToRegister(rax);
-    arg0->ToRegister(rdx);
-  }
-
-  arg0->Unuse();
-  arg1->Unuse();
-  return RawCallStub(stub);
-}
-
-
-Result VirtualFrame::CallJSFunction(int arg_count) {
-  Result function = Pop();
-
-  // InvokeFunction requires function in rdi.  Move it in there.
-  function.ToRegister(rdi);
-  function.Unuse();
-
-  // +1 for receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  ParameterCount count(arg_count);
-  __ InvokeFunction(rdi, count, CALL_FUNCTION);
-  RestoreContextRegister();
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
-  // Emit code to write elements below the stack pointer to their
-  // (already allocated) stack address.
-  ASSERT(index <= stack_pointer_);
-  FrameElement element = elements_[index];
-  ASSERT(!element.is_synced());
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      break;
-
-    case FrameElement::MEMORY:
-      // This function should not be called with synced elements.
-      // (memory elements are always synced).
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ movq(Operand(rbp, fp_relative(index)), element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      __ Move(Operand(rbp, fp_relative(index)), element.handle());
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing_element = elements_[backing_index];
-      if (backing_element.is_memory()) {
-        __ movq(kScratchRegister, Operand(rbp, fp_relative(backing_index)));
-        __ movq(Operand(rbp, fp_relative(index)), kScratchRegister);
-      } else {
-        ASSERT(backing_element.is_register());
-        __ movq(Operand(rbp, fp_relative(index)), backing_element.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-void VirtualFrame::SyncElementByPushing(int index) {
-  // Sync an element of the frame that is just above the stack pointer
-  // by pushing it.
-  ASSERT(index == stack_pointer_ + 1);
-  stack_pointer_++;
-  FrameElement element = elements_[index];
-
-  switch (element.type()) {
-    case FrameElement::INVALID:
-      __ Push(Smi::FromInt(0));
-      break;
-
-    case FrameElement::MEMORY:
-      // No memory elements exist above the stack pointer.
-      UNREACHABLE();
-      break;
-
-    case FrameElement::REGISTER:
-      __ push(element.reg());
-      break;
-
-    case FrameElement::CONSTANT:
-      __ Move(kScratchRegister, element.handle());
-      __ push(kScratchRegister);
-      break;
-
-    case FrameElement::COPY: {
-      int backing_index = element.index();
-      FrameElement backing = elements_[backing_index];
-      ASSERT(backing.is_memory() || backing.is_register());
-      if (backing.is_memory()) {
-        __ push(Operand(rbp, fp_relative(backing_index)));
-      } else {
-        __ push(backing.reg());
-      }
-      break;
-    }
-  }
-  elements_[index].set_sync();
-}
-
-
-// Clear the dirty bits for the range of elements in
-// [min(stack_pointer_ + 1,begin), end].
-void VirtualFrame::SyncRange(int begin, int end) {
-  ASSERT(begin >= 0);
-  ASSERT(end < element_count());
-  // Sync elements below the range if they have not been materialized
-  // on the stack.
-  int start = Min(begin, stack_pointer_ + 1);
-  int end_or_stack_pointer = Min(stack_pointer_, end);
-  // Emit normal push instructions for elements above stack pointer
-  // and use mov instructions if we are below stack pointer.
-  int i = start;
-
-  while (i <= end_or_stack_pointer) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
-    i++;
-  }
-  while (i <= end) {
-    SyncElementByPushing(i);
-    i++;
-  }
-}
-
-
-//------------------------------------------------------------------------------
-// Virtual frame stub and IC calling functions.
-
-Result VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(f, arg_count);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ CallRuntime(id, arg_count);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-void VirtualFrame::DebugBreak() {
-  PrepareForCall(0, 0);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ DebugBreak();
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-}
-#endif
-
-
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
-                                   int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ InvokeBuiltin(id, flag);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
-// This function assumes that the only results that could be in a_reg or b_reg
-// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
-void VirtualFrame::MoveResultsToRegisters(Result* a,
-                                          Result* b,
-                                          Register a_reg,
-                                          Register b_reg) {
-  ASSERT(!a_reg.is(b_reg));
-  // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
-  ASSERT(cgen()->allocator()->count(a_reg) <= 2);
-  ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
-  ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
-  ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
-         (a->is_register() && a->reg().is(a_reg)) ||
-         (b->is_register() && b->reg().is(a_reg)));
-  // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
-  ASSERT(cgen()->allocator()->count(b_reg) <= 2);
-  ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
-  ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
-  ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
-         (a->is_register() && a->reg().is(b_reg)) ||
-         (b->is_register() && b->reg().is(b_reg)));
-
-  if (a->is_register() && a->reg().is(a_reg)) {
-    b->ToRegister(b_reg);
-  } else if (!cgen()->allocator()->is_used(a_reg)) {
-    a->ToRegister(a_reg);
-    b->ToRegister(b_reg);
-  } else if (cgen()->allocator()->is_used(b_reg)) {
-    // a must be in b_reg, b in a_reg.
-    __ xchg(a_reg, b_reg);
-    // Results a and b will be invalidated, so it is ok if they are switched.
-  } else {
-    b->ToRegister(b_reg);
-    a->ToRegister(a_reg);
-  }
-  a->Unuse();
-  b->Unuse();
-}
-
-
-Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
-  // Name and receiver are on the top of the frame.  Both are dropped.
-  // The IC expects name in rcx and receiver in rax.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kLoadIC_Initialize));
-  Result name = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&name, &receiver, rcx, rax);
-
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame. Put them in rax and rdx.
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  MoveResultsToRegisters(&key, &receiver, rax, rdx);
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kKeyedLoadIC_Initialize));
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallStoreIC(Handle<String> name,
-                                 bool is_contextual,
-                                 StrictModeFlag strict_mode) {
-  // Value and (if not contextual) receiver are on top of the frame.
-  // The IC expects name in rcx, value in rax, and receiver in rdx.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kStoreIC_Initialize_Strict
-                                   : Builtins::kStoreIC_Initialize));
-  Result value = Pop();
-  RelocInfo::Mode mode;
-  if (is_contextual) {
-    PrepareForCall(0, 0);
-    value.ToRegister(rax);
-    __ movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    value.Unuse();
-    mode = RelocInfo::CODE_TARGET_CONTEXT;
-  } else {
-    Result receiver = Pop();
-    PrepareForCall(0, 0);
-    MoveResultsToRegisters(&value, &receiver, rax, rdx);
-    mode = RelocInfo::CODE_TARGET;
-  }
-  __ Move(rcx, name);
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
-  // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in rax, key in rcx, and receiver in rdx.
-  Result value = Pop();
-  Result key = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-  if (!cgen()->allocator()->is_used(rax) ||
-      (value.is_register() && value.reg().is(rax))) {
-    if (!cgen()->allocator()->is_used(rax)) {
-      value.ToRegister(rax);
-    }
-    MoveResultsToRegisters(&key, &receiver, rcx, rdx);
-    value.Unuse();
-  } else if (!cgen()->allocator()->is_used(rcx) ||
-             (key.is_register() && key.reg().is(rcx))) {
-    if (!cgen()->allocator()->is_used(rcx)) {
-      key.ToRegister(rcx);
-    }
-    MoveResultsToRegisters(&value, &receiver, rax, rdx);
-    key.Unuse();
-  } else if (!cgen()->allocator()->is_used(rdx) ||
-             (receiver.is_register() && receiver.reg().is(rdx))) {
-    if (!cgen()->allocator()->is_used(rdx)) {
-      receiver.ToRegister(rdx);
-    }
-    MoveResultsToRegisters(&key, &value, rcx, rax);
-    receiver.Unuse();
-  } else {
-    // All three registers are used, and no value is in the correct place.
-    // We have one of the two circular permutations of rax, rcx, rdx.
-    ASSERT(value.is_register());
-    if (value.reg().is(rcx)) {
-      __ xchg(rax, rdx);
-      __ xchg(rax, rcx);
-    } else {
-      __ xchg(rax, rcx);
-      __ xchg(rax, rdx);
-    }
-    value.Unuse();
-    key.Unuse();
-    receiver.Unuse();
-  }
-
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      (strict_mode == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict
-                                   : Builtins::kKeyedStoreIC_Initialize));
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
-Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
-                                int arg_count,
-                                int loop_nesting) {
-  // Function name, arguments, and receiver are found on top of the frame
-  // and dropped by the call.  The IC expects the name in rcx and the rest
-  // on the stack, and drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeCallInitialize(arg_count, in_loop);
-  Result name = Pop();
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  name.ToRegister(rcx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
-                                     int arg_count,
-                                     int loop_nesting) {
-  // Function name, arguments, and receiver are found on top of the frame
-  // and dropped by the call.  The IC expects the name in rcx and the rest
-  // on the stack, and drops them all.
-  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
-  Handle<Code> ic =
-      ISOLATE->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
-  Result name = Pop();
-  // Spill args, receiver, and function.  The call will drop args and
-  // receiver.
-  PrepareForCall(arg_count + 1, arg_count + 1);
-  name.ToRegister(rcx);
-  name.Unuse();
-  return RawCallCodeObject(ic, mode);
-}
-
-
-Result VirtualFrame::CallConstructor(int arg_count) {
-  // Arguments, receiver, and function are on top of the frame.  The
-  // IC expects arg count in rax, function in rdi, and the arguments
-  // and receiver on the stack.
-  Handle<Code> ic(Isolate::Current()->builtins()->builtin(
-      Builtins::kJSConstructCall));
-  // Duplicate the function before preparing the frame.
-  PushElementAt(arg_count);
-  Result function = Pop();
-  PrepareForCall(arg_count + 1, arg_count + 1);  // Spill function and args.
-  function.ToRegister(rdi);
-
-  // Constructors are called with the number of arguments in register
-  // rax for now. Another option would be to have separate construct
-  // call trampolines per different arguments counts encountered.
-  Result num_args = cgen()->allocator()->Allocate(rax);
-  ASSERT(num_args.is_valid());
-  __ Set(num_args.reg(), arg_count);
-
-  function.Unuse();
-  num_args.Unuse();
-  return RawCallCodeObject(ic, RelocInfo::CONSTRUCT_CALL);
-}
-
-
-void VirtualFrame::PushTryHandler(HandlerType type) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  // Grow the expression stack by handler size less one (the return
-  // address is already pushed by a call instruction).
-  Adjust(kHandlerSize - 1);
-  __ PushTryHandler(IN_JAVASCRIPT, type);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
deleted file mode 100644
index aac9864..0000000
--- a/src/x64/virtual-frame-x64.h
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
-#define V8_X64_VIRTUAL_FRAME_X64_H_
-
-#include "type-info.h"
-#include "register-allocator.h"
-#include "scopes.h"
-#include "codegen.h"
-
-namespace v8 {
-namespace internal {
-
-// -------------------------------------------------------------------------
-// Virtual frames
-//
-// The virtual frame is an abstraction of the physical stack frame.  It
-// encapsulates the parameters, frame-allocated locals, and the expression
-// stack.  It supports push/pop operations on the expression stack, as well
-// as random access to the expression stack elements, locals, and
-// parameters.
-
-class VirtualFrame : public ZoneObject {
- public:
-  // A utility class to introduce a scope where the virtual frame is
-  // expected to remain spilled.  The constructor spills the code
-  // generator's current frame, but no attempt is made to require it
-  // to stay spilled.  It is intended as documentation while the code
-  // generator is being transformed.
-  class SpilledScope BASE_EMBEDDED {
-   public:
-    SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
-      ASSERT(cgen()->has_valid_frame());
-      cgen()->frame()->SpillAll();
-      cgen()->set_in_spilled_code(true);
-    }
-
-    ~SpilledScope() {
-      cgen()->set_in_spilled_code(previous_state_);
-    }
-
-   private:
-    bool previous_state_;
-
-    CodeGenerator* cgen() {
-      return CodeGeneratorScope::Current(Isolate::Current());
-    }
-  };
-
-  // An illegal index into the virtual frame.
-  static const int kIllegalIndex = -1;
-
-  // Construct an initial virtual frame on entry to a JS function.
-  inline VirtualFrame();
-
-  // Construct a virtual frame as a clone of an existing one.
-  explicit inline VirtualFrame(VirtualFrame* original);
-
-  CodeGenerator* cgen() {
-    return CodeGeneratorScope::Current(Isolate::Current());
-  }
-
-  MacroAssembler* masm() { return cgen()->masm(); }
-
-  // Create a duplicate of an existing valid frame element.
-  FrameElement CopyElementAt(int index,
-    TypeInfo info = TypeInfo::Uninitialized());
-
-  // The number of elements on the virtual frame.
-  int element_count() { return elements_.length(); }
-
-  // The height of the virtual expression stack.
-  int height() {
-    return element_count() - expression_base_index();
-  }
-
-  int register_location(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num];
-  }
-
-  inline int register_location(Register reg);
-
-  inline void set_register_location(Register reg, int index);
-
-  bool is_used(int num) {
-    ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
-    return register_locations_[num] != kIllegalIndex;
-  }
-
-  inline bool is_used(Register reg);
-
-  // Add extra in-memory elements to the top of the frame to match an actual
-  // frame (eg, the frame after an exception handler is pushed).  No code is
-  // emitted.
-  void Adjust(int count);
-
-  // Forget count elements from the top of the frame all in-memory
-  // (including synced) and adjust the stack pointer downward, to
-  // match an external frame effect (examples include a call removing
-  // its arguments, and exiting a try/catch removing an exception
-  // handler).  No code will be emitted.
-  void Forget(int count) {
-    ASSERT(count >= 0);
-    ASSERT(stack_pointer_ == element_count() - 1);
-    stack_pointer_ -= count;
-    ForgetElements(count);
-  }
-
-  // Forget count elements from the top of the frame without adjusting
-  // the stack pointer downward.  This is used, for example, before
-  // merging frames at break, continue, and return targets.
-  void ForgetElements(int count);
-
-  // Spill all values from the frame to memory.
-  inline void SpillAll();
-
-  // Spill all occurrences of a specific register from the frame.
-  void Spill(Register reg) {
-    if (is_used(reg)) SpillElementAt(register_location(reg));
-  }
-
-  // Spill all occurrences of an arbitrary register if possible.  Return the
-  // register spilled or no_reg if it was not possible to free any register
-  // (ie, they all have frame-external references).
-  Register SpillAnyRegister();
-
-  // Spill the top element of the frame to memory.
-  void SpillTop() { SpillElementAt(element_count() - 1); }
-
-  // Sync the range of elements in [begin, end] with memory.
-  void SyncRange(int begin, int end);
-
-  // Make this frame so that an arbitrary frame of the same height can
-  // be merged to it.  Copies and constants are removed from the frame.
-  void MakeMergable();
-
-  // Prepare this virtual frame for merging to an expected frame by
-  // performing some state changes that do not require generating
-  // code.  It is guaranteed that no code will be generated.
-  void PrepareMergeTo(VirtualFrame* expected);
-
-  // Make this virtual frame have a state identical to an expected virtual
-  // frame.  As a side effect, code may be emitted to make this frame match
-  // the expected one.
-  void MergeTo(VirtualFrame* expected);
-
-  // Detach a frame from its code generator, perhaps temporarily.  This
-  // tells the register allocator that it is free to use frame-internal
-  // registers.  Used when the code generator's frame is switched from this
-  // one to NULL by an unconditional jump.
-  void DetachFromCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Unuse(i);
-    }
-  }
-
-  // (Re)attach a frame to its code generator.  This informs the register
-  // allocator that the frame-internal register references are active again.
-  // Used when a code generator's frame is switched from NULL to this one by
-  // binding a label.
-  void AttachToCodeGenerator() {
-    RegisterAllocator* cgen_allocator = cgen()->allocator();
-    for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
-      if (is_used(i)) cgen_allocator->Use(i);
-    }
-  }
-
-  // Emit code for the physical JS entry and exit frame sequences.  After
-  // calling Enter, the virtual frame is ready for use; and after calling
-  // Exit it should not be used.  Note that Enter does not allocate space in
-  // the physical frame for storing frame-allocated locals.
-  void Enter();
-  void Exit();
-
-  // Prepare for returning from the frame by spilling locals.  This
-  // avoids generating unnecessary merge code when jumping to the
-  // shared return site.  Emits code for spills.
-  inline void PrepareForReturn();
-
-  // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 14;
-
-  // Allocate and initialize the frame-allocated locals.
-  void AllocateStackSlots();
-
-  // An element of the expression stack as an assembly operand.
-  Operand ElementAt(int index) const {
-    return Operand(rsp, index * kPointerSize);
-  }
-
-  // Random-access store to a frame-top relative frame element.  The result
-  // becomes owned by the frame and is invalidated.
-  void SetElementAt(int index, Result* value);
-
-  // Set a frame element to a constant.  The index is frame-top relative.
-  inline void SetElementAt(int index, Handle<Object> value);
-
-  void PushElementAt(int index) {
-    PushFrameSlotAt(element_count() - index - 1);
-  }
-
-  void StoreToElementAt(int index) {
-    StoreToFrameSlotAt(element_count() - index - 1);
-  }
-
-  // A frame-allocated local as an assembly operand.
-  Operand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return Operand(rbp, kLocal0Offset - index * kPointerSize);
-  }
-
-  // Push a copy of the value of a local frame slot on top of the frame.
-  void PushLocalAt(int index) {
-    PushFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the value of a local frame slot on top of the frame and invalidate
-  // the local slot.  The slot should be written to before trying to read
-  // from it again.
-  void TakeLocalAt(int index) {
-    TakeFrameSlotAt(local0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a local frame slot.  The
-  // value is left in place on top of the frame.
-  void StoreToLocalAt(int index) {
-    StoreToFrameSlotAt(local0_index() + index);
-  }
-
-  // Push the address of the receiver slot on the frame.
-  void PushReceiverSlotAddress();
-
-  // Push the function on top of the frame.
-  void PushFunction() { PushFrameSlotAt(function_index()); }
-
-  // Save the value of the esi register to the context frame slot.
-  void SaveContextRegister();
-
-  // Restore the esi register from the value of the context frame
-  // slot.
-  void RestoreContextRegister();
-
-  // A parameter as an assembly operand.
-  Operand ParameterAt(int index) {
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index < parameter_count());
-    return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
-  }
-
-  // Push a copy of the value of a parameter frame slot on top of the frame.
-  void PushParameterAt(int index) {
-    PushFrameSlotAt(param0_index() + index);
-  }
-
-  // Push the value of a paramter frame slot on top of the frame and
-  // invalidate the parameter slot.  The slot should be written to before
-  // trying to read from it again.
-  void TakeParameterAt(int index) {
-    TakeFrameSlotAt(param0_index() + index);
-  }
-
-  // Store the top value on the virtual frame into a parameter frame slot.
-  // The value is left in place on top of the frame.
-  void StoreToParameterAt(int index) {
-    StoreToFrameSlotAt(param0_index() + index);
-  }
-
-  // The receiver frame slot.
-  Operand Receiver() { return ParameterAt(-1); }
-
-  // Push a try-catch or try-finally handler on top of the virtual frame.
-  void PushTryHandler(HandlerType type);
-
-  // Call stub given the number of arguments it expects on (and
-  // removes from) the stack.
-  inline Result CallStub(CodeStub* stub, int arg_count);
-
-  // Call stub that takes a single argument passed in eax.  The
-  // argument is given as a result which does not have to be eax or
-  // even a register.  The argument is consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg);
-
-  // Call stub that takes a pair of arguments passed in edx (arg0, rdx) and
-  // eax (arg1, rax).  The arguments are given as results which do not have
-  // to be in the proper registers or even in registers.  The
-  // arguments are consumed by the call.
-  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
-
-  // Call JS function from top of the stack with arguments
-  // taken from the stack.
-  Result CallJSFunction(int arg_count);
-
-  // Call runtime given the number of arguments expected on (and
-  // removed from) the stack.
-  Result CallRuntime(const Runtime::Function* f, int arg_count);
-  Result CallRuntime(Runtime::FunctionId id, int arg_count);
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  void DebugBreak();
-#endif
-
-  // Invoke builtin given the number of arguments it expects on (and
-  // removes from) the stack.
-  Result InvokeBuiltin(Builtins::JavaScript id,
-                       InvokeFlag flag,
-                       int arg_count);
-
-  // Call load IC.  Name and receiver are found on top of the frame.
-  // Both are dropped.
-  Result CallLoadIC(RelocInfo::Mode mode);
-
-  // Call keyed load IC.  Key and receiver are found on top of the
-  // frame.  Both are dropped.
-  Result CallKeyedLoadIC(RelocInfo::Mode mode);
-
-  // Call store IC.  If the load is contextual, value is found on top of the
-  // frame.  If not, value and receiver are on the frame.  Both are dropped.
-  Result CallStoreIC(Handle<String> name, bool is_contextual,
-                     StrictModeFlag strict_mode);
-
-  // Call keyed store IC.  Value, key, and receiver are found on top
-  Result CallKeyedStoreIC(StrictModeFlag strict_mode);
-
-  // Call call IC.  Function name, arguments, and receiver are found on top
-  // of the frame and dropped by the call.
-  // The argument count does not include the receiver.
-  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Call keyed call IC.  Same calling convention as CallCallIC.
-  Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
-
-  // Allocate and call JS function as constructor.  Arguments,
-  // receiver (global object), and function are found on top of the
-  // frame.  Function is not dropped.  The argument count does not
-  // include the receiver.
-  Result CallConstructor(int arg_count);
-
-  // Drop a number of elements from the top of the expression stack.  May
-  // emit code to affect the physical frame.  Does not clobber any registers
-  // excepting possibly the stack pointer.
-  void Drop(int count);
-
-  // Drop one element.
-  void Drop() { Drop(1); }
-
-  // Duplicate the top element of the frame.
-  void Dup() { PushFrameSlotAt(element_count() - 1); }
-
-  // Duplicate the n'th element from the top of the frame.
-  // Dup(1) is equivalent to Dup().
-  void Dup(int n) {
-    ASSERT(n > 0);
-    PushFrameSlotAt(element_count() - n);
-  }
-
-  // Pop an element from the top of the expression stack.  Returns a
-  // Result, which may be a constant or a register.
-  Result Pop();
-
-  // Pop and save an element from the top of the expression stack and
-  // emit a corresponding pop instruction.
-  void EmitPop(Register reg);
-  void EmitPop(const Operand& operand);
-
-  // Push an element on top of the expression stack and emit a
-  // corresponding push instruction.
-  void EmitPush(Register reg,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(const Operand& operand,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Heap::RootListIndex index,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Immediate immediate,
-                TypeInfo info = TypeInfo::Unknown());
-  void EmitPush(Smi* value);
-  // Uses kScratchRegister, emits appropriate relocation info.
-  void EmitPush(Handle<Object> value);
-
-  inline bool ConstantPoolOverflowed();
-
-  // Push an element on the virtual frame.
-  void Push(Handle<Object> value);
-  inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
-  inline void Push(Smi* value);
-
-  // Pushing a result invalidates it (its contents become owned by the
-  // frame).
-  void Push(Result* result) {
-    if (result->is_register()) {
-      Push(result->reg(), result->type_info());
-    } else {
-      ASSERT(result->is_constant());
-      Push(result->handle());
-    }
-    result->Unuse();
-  }
-
-  // Pushing an expression expects that the expression is trivial (according
-  // to Expression::IsTrivial).
-  void Push(Expression* expr);
-
-  // Nip removes zero or more elements from immediately below the top
-  // of the frame, leaving the previous top-of-frame value on top of
-  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
-  inline void Nip(int num_dropped);
-
-  inline void SetTypeForLocalAt(int index, TypeInfo info);
-  inline void SetTypeForParamAt(int index, TypeInfo info);
-
- private:
-  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
-  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
-  static const int kContextOffset = StandardFrameConstants::kContextOffset;
-
-  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
-  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
-
-  ZoneList<FrameElement> elements_;
-
-  // The index of the element that is at the processor's stack pointer
-  // (the esp register).
-  int stack_pointer_;
-
-  // The index of the register frame element using each register, or
-  // kIllegalIndex if a register is not on the frame.
-  int register_locations_[RegisterAllocator::kNumRegisters];
-
-  // The number of frame-allocated locals and parameters respectively.
-  inline int parameter_count();
-  inline int local_count();
-
-  // The index of the element that is at the processor's frame pointer
-  // (the ebp register).  The parameters, receiver, and return address
-  // are below the frame pointer.
-  int frame_pointer() { return parameter_count() + 2; }
-
-  // The index of the first parameter.  The receiver lies below the first
-  // parameter.
-  int param0_index() { return 1; }
-
-  // The index of the context slot in the frame.  It is immediately
-  // above the frame pointer.
-  int context_index() { return frame_pointer() + 1; }
-
-  // The index of the function slot in the frame.  It is above the frame
-  // pointer and the context slot.
-  int function_index() { return frame_pointer() + 2; }
-
-  // The index of the first local.  Between the frame pointer and the
-  // locals lie the context and the function.
-  int local0_index() { return frame_pointer() + 3; }
-
-  // The index of the base of the expression stack.
-  int expression_base_index() { return local0_index() + local_count(); }
-
-  // Convert a frame index into a frame pointer relative offset into the
-  // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
-
-  // Record an occurrence of a register in the virtual frame.  This has the
-  // effect of incrementing the register's external reference count and
-  // of updating the index of the register's location in the frame.
-  void Use(Register reg, int index) {
-    ASSERT(!is_used(reg));
-    set_register_location(reg, index);
-    cgen()->allocator()->Use(reg);
-  }
-
-  // Record that a register reference has been dropped from the frame.  This
-  // decrements the register's external reference count and invalidates the
-  // index of the register's location in the frame.
-  void Unuse(Register reg) {
-    ASSERT(is_used(reg));
-    set_register_location(reg, kIllegalIndex);
-    cgen()->allocator()->Unuse(reg);
-  }
-
-  // Spill the element at a particular index---write it to memory if
-  // necessary, free any associated register, and forget its value if
-  // constant.
-  void SpillElementAt(int index);
-
-  // Sync the element at a particular index.  If it is a register or
-  // constant that disagrees with the value on the stack, write it to memory.
-  // Keep the element type as register or constant, and clear the dirty bit.
-  void SyncElementAt(int index);
-
-  // Sync a single unsynced element that lies beneath or at the stack pointer.
-  void SyncElementBelowStackPointer(int index);
-
-  // Sync a single unsynced element that lies just above the stack pointer.
-  void SyncElementByPushing(int index);
-
-  // Push a copy of a frame slot (typically a local or parameter) on top of
-  // the frame.
-  inline void PushFrameSlotAt(int index);
-
-  // Push a the value of a frame slot (typically a local or parameter) on
-  // top of the frame and invalidate the slot.
-  void TakeFrameSlotAt(int index);
-
-  // Store the value on top of the frame to a frame slot (typically a local
-  // or parameter).
-  void StoreToFrameSlotAt(int index);
-
-  // Spill all elements in registers. Spill the top spilled_args elements
-  // on the frame.  Sync all other frame elements.
-  // Then drop dropped_args elements from the virtual frame, to match
-  // the effect of an upcoming call that will drop them from the stack.
-  void PrepareForCall(int spilled_args, int dropped_args);
-
-  // Move frame elements currently in registers or constants, that
-  // should be in memory in the expected frame, to memory.
-  void MergeMoveRegistersToMemory(VirtualFrame* expected);
-
-  // Make the register-to-register moves necessary to
-  // merge this frame with the expected frame.
-  // Register to memory moves must already have been made,
-  // and memory to register moves must follow this call.
-  // This is because some new memory-to-register moves are
-  // created in order to break cycles of register moves.
-  // Used in the implementation of MergeTo().
-  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
-
-  // Make the memory-to-register and constant-to-register moves
-  // needed to make this frame equal the expected frame.
-  // Called after all register-to-memory and register-to-register
-  // moves have been made.  After this function returns, the frames
-  // should be equal.
-  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
-
-  // Invalidates a frame slot (puts an invalid frame element in it).
-  // Copies on the frame are correctly handled, and if this slot was
-  // the backing store of copies, the index of the new backing store
-  // is returned.  Otherwise, returns kIllegalIndex.
-  // Register counts are correctly updated.
-  int InvalidateFrameSlotAt(int index);
-
-  // This function assumes that a and b are the only results that could be in
-  // the registers a_reg or b_reg.  Other results can be live, but must not
-  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
-  void MoveResultsToRegisters(Result* a,
-                              Result* b,
-                              Register a_reg,
-                              Register b_reg);
-
-  // Call a code stub that has already been prepared for calling (via
-  // PrepareForCall).
-  Result RawCallStub(CodeStub* stub);
-
-  // Calls a code object which has already been prepared for calling
-  // (via PrepareForCall).
-  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
-  inline bool Equals(VirtualFrame* other);
-
-  // Classes that need raw access to the elements_ array.
-  friend class FrameRegisterState;
-  friend class JumpTarget;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 516fc4a..17e83dc 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -97,6 +97,10 @@
   return ZONE->New(static_cast<int>(size));
 }
 
+void* ZoneObject::operator new(size_t size, Zone* zone) {
+  return zone->New(static_cast<int>(size));
+}
+
 
 inline void* ZoneListAllocationPolicy::New(int size) {
   return ZONE->New(size);
diff --git a/src/zone.h b/src/zone.h
index 13b55c4..9efe4f5 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -133,6 +133,7 @@
  public:
   // Allocate a new ZoneObject of 'size' bytes in the Zone.
   inline void* operator new(size_t size);
+  inline void* operator new(size_t size, Zone* zone);
 
   // Ideally, the delete operator should be private instead of
   // public, but unfortunately the compiler sometimes synthesizes